[rfc][patch 3/3] block: non-atomic queue_flags accessors

2007-12-14 Thread Nick Piggin

Introduce queue_ accessors to set and clear queue_flags, which include debug
checks to ensure queue_lock is held. Non-checking versions are provided where
it is known that there can be no parallelism on queue_flags.

Index: linux-2.6/block/elevator.c
===
--- linux-2.6.orig/block/elevator.c
+++ linux-2.6/block/elevator.c
@@ -1032,7 +1032,7 @@ static int elevator_switch(struct reques
 */
spin_lock_irq(q->queue_lock);
 
-   __set_bit(QUEUE_FLAG_ELVSWITCH, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
 
elv_drain_elevator(q);
 
@@ -1067,7 +1067,7 @@ static int elevator_switch(struct reques
 */
elevator_exit(old_elevator);
spin_lock_irq(q->queue_lock);
-   __clear_bit(QUEUE_FLAG_ELVSWITCH, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
 
return 1;
@@ -1082,7 +1082,7 @@ fail_register:
elv_register_queue(q);
 
spin_lock_irq(q->queue_lock);
-   __clear_bit(QUEUE_FLAG_ELVSWITCH, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
 
return 0;
Index: linux-2.6/block/ll_rw_blk.c
===
--- linux-2.6.orig/block/ll_rw_blk.c
+++ linux-2.6/block/ll_rw_blk.c
@@ -720,7 +720,7 @@ void blk_queue_stack_limits(struct reque
t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, >queue_flags))
-   __clear_bit(QUEUE_FLAG_CLUSTER, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
 }
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -823,7 +823,7 @@ static void __blk_queue_free_tags(struct
__blk_free_tags(bqt);
 
q->queue_tags = NULL;
-   __clear_bit(QUEUE_FLAG_QUEUED, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 
@@ -852,7 +852,7 @@ EXPORT_SYMBOL(blk_free_tags);
  **/
 void blk_queue_free_tags(struct request_queue *q)
 {
-   __clear_bit(QUEUE_FLAG_QUEUED, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 EXPORT_SYMBOL(blk_queue_free_tags);
@@ -942,7 +942,7 @@ int blk_queue_init_tags(struct request_q
} else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth)))
return rc;
-   __set_bit(QUEUE_FLAG_QUEUED, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(>refcnt);
@@ -951,7 +951,7 @@ int blk_queue_init_tags(struct request_q
 * assign it, all done
 */
q->queue_tags = tags;
-   __set_bit(QUEUE_FLAG_QUEUED, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(>tag_busy_list);
return 0;
 fail:
@@ -1558,7 +1558,7 @@ void blk_plug_device(struct request_queu
return;
 
if (!test_bit(QUEUE_FLAG_PLUGGED, >queue_flags)) {
-   __set_bit(QUEUE_FLAG_PLUGGED, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_PLUGGED, q);
mod_timer(>unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
@@ -1576,7 +1576,7 @@ int blk_remove_plug(struct request_queue
 
if (!test_bit(QUEUE_FLAG_PLUGGED, >queue_flags))
return 0;
-   __clear_bit(QUEUE_FLAG_PLUGGED, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
 
del_timer(>unplug_timer);
return 1;
@@ -1674,16 +1674,16 @@ void blk_start_queue(struct request_queu
 {
WARN_ON(!irqs_disabled());
 
-   __clear_bit(QUEUE_FLAG_STOPPED, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 
/*
 * one level of recursion is ok and is much faster than kicking
 * the unplug handling
 */
if (!test_bit(QUEUE_FLAG_REENTER, >queue_flags)) {
-   __set_bit(QUEUE_FLAG_REENTER, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
-   __clear_bit(QUEUE_FLAG_REENTER, >queue_flags);
+   queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(>unplug_work);
@@ -1709,7 +1709,7 @@ EXPORT_SYMBOL(blk_start_queue);
 void blk_stop_queue(struct request_queue *q)
 {
blk_remove_plug(q);
-   __set_bit(QUEUE_FLAG_STOPPED, >queue_flags);
+   queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
 
@@ -1748,9 +1748,9 @@ void __blk_run_queue(struct request_queu
 */
if (!elv_queue_empty(q)) {
if (!test_bit(QUEUE_FLAG_REENTER, >queue_flags)) {
-   __set_bit(QUEUE_FLAG_REENTER, >queue_flags);

[rfc][patch 3/3] block: non-atomic queue_flags accessors

2007-12-14 Thread Nick Piggin

Introduce queue_ accessors to set and clear queue_flags, which include debug
checks to ensure queue_lock is held. Non-checking versions are provided where
it is known that there can be no parallelism on queue_flags.

Index: linux-2.6/block/elevator.c
===
--- linux-2.6.orig/block/elevator.c
+++ linux-2.6/block/elevator.c
@@ -1032,7 +1032,7 @@ static int elevator_switch(struct reques
 */
spin_lock_irq(q-queue_lock);
 
-   __set_bit(QUEUE_FLAG_ELVSWITCH, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
 
elv_drain_elevator(q);
 
@@ -1067,7 +1067,7 @@ static int elevator_switch(struct reques
 */
elevator_exit(old_elevator);
spin_lock_irq(q-queue_lock);
-   __clear_bit(QUEUE_FLAG_ELVSWITCH, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q-queue_lock);
 
return 1;
@@ -1082,7 +1082,7 @@ fail_register:
elv_register_queue(q);
 
spin_lock_irq(q-queue_lock);
-   __clear_bit(QUEUE_FLAG_ELVSWITCH, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q-queue_lock);
 
return 0;
Index: linux-2.6/block/ll_rw_blk.c
===
--- linux-2.6.orig/block/ll_rw_blk.c
+++ linux-2.6/block/ll_rw_blk.c
@@ -720,7 +720,7 @@ void blk_queue_stack_limits(struct reque
t-max_segment_size = min(t-max_segment_size,b-max_segment_size);
t-hardsect_size = max(t-hardsect_size,b-hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, b-queue_flags))
-   __clear_bit(QUEUE_FLAG_CLUSTER, t-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
 }
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -823,7 +823,7 @@ static void __blk_queue_free_tags(struct
__blk_free_tags(bqt);
 
q-queue_tags = NULL;
-   __clear_bit(QUEUE_FLAG_QUEUED, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 
@@ -852,7 +852,7 @@ EXPORT_SYMBOL(blk_free_tags);
  **/
 void blk_queue_free_tags(struct request_queue *q)
 {
-   __clear_bit(QUEUE_FLAG_QUEUED, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 EXPORT_SYMBOL(blk_queue_free_tags);
@@ -942,7 +942,7 @@ int blk_queue_init_tags(struct request_q
} else if (q-queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth)))
return rc;
-   __set_bit(QUEUE_FLAG_QUEUED, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(tags-refcnt);
@@ -951,7 +951,7 @@ int blk_queue_init_tags(struct request_q
 * assign it, all done
 */
q-queue_tags = tags;
-   __set_bit(QUEUE_FLAG_QUEUED, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(q-tag_busy_list);
return 0;
 fail:
@@ -1558,7 +1558,7 @@ void blk_plug_device(struct request_queu
return;
 
if (!test_bit(QUEUE_FLAG_PLUGGED, q-queue_flags)) {
-   __set_bit(QUEUE_FLAG_PLUGGED, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_PLUGGED, q);
mod_timer(q-unplug_timer, jiffies + q-unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
@@ -1576,7 +1576,7 @@ int blk_remove_plug(struct request_queue
 
if (!test_bit(QUEUE_FLAG_PLUGGED, q-queue_flags))
return 0;
-   __clear_bit(QUEUE_FLAG_PLUGGED, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
 
del_timer(q-unplug_timer);
return 1;
@@ -1674,16 +1674,16 @@ void blk_start_queue(struct request_queu
 {
WARN_ON(!irqs_disabled());
 
-   __clear_bit(QUEUE_FLAG_STOPPED, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 
/*
 * one level of recursion is ok and is much faster than kicking
 * the unplug handling
 */
if (!test_bit(QUEUE_FLAG_REENTER, q-queue_flags)) {
-   __set_bit(QUEUE_FLAG_REENTER, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_REENTER, q);
q-request_fn(q);
-   __clear_bit(QUEUE_FLAG_REENTER, q-queue_flags);
+   queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(q-unplug_work);
@@ -1709,7 +1709,7 @@ EXPORT_SYMBOL(blk_start_queue);
 void blk_stop_queue(struct request_queue *q)
 {
blk_remove_plug(q);
-   __set_bit(QUEUE_FLAG_STOPPED, q-queue_flags);
+   queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
 
@@ -1748,9 +1748,9 @@ void __blk_run_queue(struct request_queu
 */
if (!elv_queue_empty(q)) {
if (!test_bit(QUEUE_FLAG_REENTER, q-queue_flags)) {
-   __set_bit(QUEUE_FLAG_REENTER,