[PATCH 26/48] writeback, blkcg: restructure blk_{set|clear}_queue_congested()

2015-03-22 Thread Tejun Heo
blk_{set|clear}_queue_congested() take @q and set or clear,
respectively, the congestion state of its bdi's root wb.  Because bdi
used to be able to handle congestion state only on the root wb, the
callers of those functions tested whether the congestion is on the
root blkcg and skipped if not.

This is cumbersome and makes implementation of per cgroup
bdi_writeback congestion state propagation difficult.  This patch
renames blk_{set|clear}_queue_congested() to
blk_{set|clear}_congested(), and makes them take request_list instead
of request_queue and test whether the specified request_list is the
root one before updating bdi_writeback congestion state.  This makes
the tests in the callers unnecessary and simplifies them.

As there are no external users of these functions, the definitions are
moved from include/linux/blkdev.h to block/blk-core.c.

This patch doesn't introduce any noticeable behavior difference.

Signed-off-by: Tejun Heo 
Cc: Jens Axboe 
Cc: Jan Kara 
Cc: Vivek Goyal 
---
 block/blk-core.c   | 62 ++
 include/linux/blkdev.h | 19 
 2 files changed, 37 insertions(+), 44 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index c44018a..cad26e3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -63,6 +63,28 @@ struct kmem_cache *blk_requestq_cachep;
  */
 static struct workqueue_struct *kblockd_workqueue;
 
+static void blk_clear_congested(struct request_list *rl, int sync)
+{
+   if (rl != >q->root_rl)
+   return;
+#ifdef CONFIG_CGROUP_WRITEBACK
+   clear_wb_congested(rl->blkg->wb_congested, sync);
+#else
+   clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+#endif
+}
+
+static void blk_set_congested(struct request_list *rl, int sync)
+{
+   if (rl != >q->root_rl)
+   return;
+#ifdef CONFIG_CGROUP_WRITEBACK
+   set_wb_congested(rl->blkg->wb_congested, sync);
+#else
+   set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+#endif
+}
+
 void blk_queue_congestion_threshold(struct request_queue *q)
 {
int nr;
@@ -827,13 +849,8 @@ static void __freed_request(struct request_list *rl, int 
sync)
 {
struct request_queue *q = rl->q;
 
-   /*
-* bdi isn't aware of blkcg yet.  As all async IOs end up root
-* blkcg anyway, just use root blkcg state.
-*/
-   if (rl == >root_rl &&
-   rl->count[sync] < queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, sync);
+   if (rl->count[sync] < queue_congestion_off_threshold(q))
+   blk_clear_congested(rl, sync);
 
if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(>wait[sync]))
@@ -866,25 +883,25 @@ static void freed_request(struct request_list *rl, 
unsigned int flags)
 int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
 {
struct request_list *rl;
+   int on_thresh, off_thresh;
 
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
+   on_thresh = queue_congestion_on_threshold(q);
+   off_thresh = queue_congestion_off_threshold(q);
 
-   /* congestion isn't cgroup aware and follows root blkcg for now */
-   rl = >root_rl;
-
-   if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
-   blk_set_queue_congested(q, BLK_RW_SYNC);
-   else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, BLK_RW_SYNC);
+   blk_queue_for_each_rl(rl, q) {
+   if (rl->count[BLK_RW_SYNC] >= on_thresh)
+   blk_set_congested(rl, BLK_RW_SYNC);
+   else if (rl->count[BLK_RW_SYNC] < off_thresh)
+   blk_clear_congested(rl, BLK_RW_SYNC);
 
-   if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
-   blk_set_queue_congested(q, BLK_RW_ASYNC);
-   else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, BLK_RW_ASYNC);
+   if (rl->count[BLK_RW_ASYNC] >= on_thresh)
+   blk_set_congested(rl, BLK_RW_ASYNC);
+   else if (rl->count[BLK_RW_ASYNC] < off_thresh)
+   blk_clear_congested(rl, BLK_RW_ASYNC);
 
-   blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
@@ -994,12 +1011,7 @@ static struct request *__get_request(struct request_list 
*rl, int rw_flags,
}
}
}
-   /*
-* bdi isn't aware of blkcg yet.  As all async IOs end up
-* root blkcg anyway, just use root blkcg state.
-*/
-   if (rl == >root_rl)
-   

[PATCH 26/48] writeback, blkcg: restructure blk_{set|clear}_queue_congested()

2015-03-22 Thread Tejun Heo
blk_{set|clear}_queue_congested() take @q and set or clear,
respectively, the congestion state of its bdi's root wb.  Because bdi
used to be able to handle congestion state only on the root wb, the
callers of those functions tested whether the congestion is on the
root blkcg and skipped if not.

This is cumbersome and makes implementation of per cgroup
bdi_writeback congestion state propagation difficult.  This patch
renames blk_{set|clear}_queue_congested() to
blk_{set|clear}_congested(), and makes them take request_list instead
of request_queue and test whether the specified request_list is the
root one before updating bdi_writeback congestion state.  This makes
the tests in the callers unnecessary and simplifies them.

As there are no external users of these functions, the definitions are
moved from include/linux/blkdev.h to block/blk-core.c.

This patch doesn't introduce any noticeable behavior difference.

Signed-off-by: Tejun Heo t...@kernel.org
Cc: Jens Axboe ax...@kernel.dk
Cc: Jan Kara j...@suse.cz
Cc: Vivek Goyal vgo...@redhat.com
---
 block/blk-core.c   | 62 ++
 include/linux/blkdev.h | 19 
 2 files changed, 37 insertions(+), 44 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index c44018a..cad26e3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -63,6 +63,28 @@ struct kmem_cache *blk_requestq_cachep;
  */
 static struct workqueue_struct *kblockd_workqueue;
 
+static void blk_clear_congested(struct request_list *rl, int sync)
+{
+   if (rl != rl-q-root_rl)
+   return;
+#ifdef CONFIG_CGROUP_WRITEBACK
+   clear_wb_congested(rl-blkg-wb_congested, sync);
+#else
+   clear_wb_congested(rl-q-backing_dev_info.wb.congested, sync);
+#endif
+}
+
+static void blk_set_congested(struct request_list *rl, int sync)
+{
+   if (rl != rl-q-root_rl)
+   return;
+#ifdef CONFIG_CGROUP_WRITEBACK
+   set_wb_congested(rl-blkg-wb_congested, sync);
+#else
+   set_wb_congested(rl-q-backing_dev_info.wb.congested, sync);
+#endif
+}
+
 void blk_queue_congestion_threshold(struct request_queue *q)
 {
int nr;
@@ -827,13 +849,8 @@ static void __freed_request(struct request_list *rl, int 
sync)
 {
struct request_queue *q = rl-q;
 
-   /*
-* bdi isn't aware of blkcg yet.  As all async IOs end up root
-* blkcg anyway, just use root blkcg state.
-*/
-   if (rl == q-root_rl 
-   rl-count[sync]  queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, sync);
+   if (rl-count[sync]  queue_congestion_off_threshold(q))
+   blk_clear_congested(rl, sync);
 
if (rl-count[sync] + 1 = q-nr_requests) {
if (waitqueue_active(rl-wait[sync]))
@@ -866,25 +883,25 @@ static void freed_request(struct request_list *rl, 
unsigned int flags)
 int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
 {
struct request_list *rl;
+   int on_thresh, off_thresh;
 
spin_lock_irq(q-queue_lock);
q-nr_requests = nr;
blk_queue_congestion_threshold(q);
+   on_thresh = queue_congestion_on_threshold(q);
+   off_thresh = queue_congestion_off_threshold(q);
 
-   /* congestion isn't cgroup aware and follows root blkcg for now */
-   rl = q-root_rl;
-
-   if (rl-count[BLK_RW_SYNC] = queue_congestion_on_threshold(q))
-   blk_set_queue_congested(q, BLK_RW_SYNC);
-   else if (rl-count[BLK_RW_SYNC]  queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, BLK_RW_SYNC);
+   blk_queue_for_each_rl(rl, q) {
+   if (rl-count[BLK_RW_SYNC] = on_thresh)
+   blk_set_congested(rl, BLK_RW_SYNC);
+   else if (rl-count[BLK_RW_SYNC]  off_thresh)
+   blk_clear_congested(rl, BLK_RW_SYNC);
 
-   if (rl-count[BLK_RW_ASYNC] = queue_congestion_on_threshold(q))
-   blk_set_queue_congested(q, BLK_RW_ASYNC);
-   else if (rl-count[BLK_RW_ASYNC]  queue_congestion_off_threshold(q))
-   blk_clear_queue_congested(q, BLK_RW_ASYNC);
+   if (rl-count[BLK_RW_ASYNC] = on_thresh)
+   blk_set_congested(rl, BLK_RW_ASYNC);
+   else if (rl-count[BLK_RW_ASYNC]  off_thresh)
+   blk_clear_congested(rl, BLK_RW_ASYNC);
 
-   blk_queue_for_each_rl(rl, q) {
if (rl-count[BLK_RW_SYNC] = q-nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
@@ -994,12 +1011,7 @@ static struct request *__get_request(struct request_list 
*rl, int rw_flags,
}
}
}
-   /*
-* bdi isn't aware of blkcg yet.  As all async IOs end up
-* root blkcg anyway, just use root blkcg state.
-*/
-   if (rl == q-root_rl)
-