This patch introduces an heuristic that reduces latency when the
I/O-request pool is saturated. This goal is achieved by disabling
device idling, for non-weight-raised queues, when there are weight-
raised queues with pending or in-flight requests. In fact, as
explained in more detail in the comment on the function
bfq_bfqq_must_not_expire(), this reduces the rate at which processes
associated with non-weight-raised queues grab requests from the pool,
thereby increasing the probability that processes associated with
weight-raised queues get a request immediately (or at least soon) when
they need one.

Signed-off-by: Paolo Valente <paolo.vale...@linaro.org>
Signed-off-by: Arianna Avanzini <avanzini.aria...@gmail.com>
---
 block/bfq.h         |  2 ++
 block/cfq-iosched.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 53 insertions(+), 2 deletions(-)

diff --git a/block/bfq.h b/block/bfq.h
index 68969ba..2960e5d 100644
--- a/block/bfq.h
+++ b/block/bfq.h
@@ -299,6 +299,7 @@ enum bfq_device_speed {
  * @root_group: root bfq_group for the device.
  * @busy_queues: number of bfq_queues containing requests (including the
  *              queue in service, even if it is idling).
+ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
  * @queued: number of queued requests.
  * @rq_in_driver: number of requests dispatched and waiting for completion.
  * @sync_flight: number of sync requests in the driver.
@@ -368,6 +369,7 @@ struct bfq_data {
        struct bfq_group *root_group;
 
        int busy_queues;
+       int wr_busy_queues;
        int queued;
        int rq_in_driver;
        int sync_flight;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 40feb47..0539df4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1228,6 +1228,9 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, 
struct bfq_queue *bfqq,
 
        bfqd->busy_queues--;
 
+       if (bfqq->wr_coeff > 1)
+               bfqd->wr_busy_queues--;
+
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        bfqg_stats_update_dequeue(bfqq_group(bfqq));
 #endif
@@ -1246,6 +1249,9 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, 
struct bfq_queue *bfqq)
 
        bfq_mark_bfqq_busy(bfqq);
        bfqd->busy_queues++;
+
+       if (bfqq->wr_coeff > 1)
+               bfqd->wr_busy_queues++;
 }
 
 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
@@ -2722,6 +2728,7 @@ add_bfqq_busy:
                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
                        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
 
+                       bfqd->wr_busy_queues++;
                        entity->prio_changed = 1;
                        bfq_log_bfqq(bfqd, bfqq,
                            "non-idle wrais starting at %lu, rais_max_time %u",
@@ -2889,6 +2896,8 @@ static void bfq_merged_requests(struct request_queue *q, 
struct request *rq,
 /* Must be called with bfqq != NULL */
 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
 {
+       if (bfq_bfqq_busy(bfqq))
+               bfqq->bfqd->wr_busy_queues--;
        bfqq->wr_coeff = 1;
        bfqq->wr_cur_max_time = 0;
        /* Trigger a weight change on the next activation of the queue */
@@ -3677,7 +3686,8 @@ static bool bfq_may_expire_for_budg_timeout(struct 
bfq_queue *bfqq)
 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
 {
        struct bfq_data *bfqd = bfqq->bfqd;
-       bool idling_boosts_thr, asymmetric_scenario;
+       bool idling_boosts_thr, idling_boosts_thr_without_issues,
+               asymmetric_scenario;
 
        /*
         * The next variable takes into account the cases where idling
@@ -3697,6 +3707,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
        idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
 
        /*
+        * The value of the next variable,
+        * idling_boosts_thr_without_issues, is equal to that of
+        * idling_boosts_thr, unless a special case holds. In this
+        * special case, described below, idling may cause problems to
+        * weight-raised queues.
+        *
+        * When the request pool is saturated (e.g., in the presence
+        * of write hogs), if the processes associated with
+        * non-weight-raised queues ask for requests at a lower rate,
+        * then processes associated with weight-raised queues have a
+        * higher probability to get a request from the pool
+        * immediately (or at least soon) when they need one. Thus
+        * they have a higher probability to actually get a fraction
+        * of the device throughput proportional to their high
+        * weight. This is especially true with NCQ-capable drives,
+        * which enqueue several requests in advance, and further
+        * reorder internally-queued requests.
+        *
+        * For this reason, we force to false the value of
+        * idling_boosts_thr_without_issues if there are weight-raised
+        * busy queues. In this case, and if bfqq is not weight-raised,
+        * this guarantees that the device is not idled for bfqq (if,
+        * instead, bfqq is weight-raised, then idling will be
+        * guaranteed by another variable, see below). Combined with
+        * the timestamping rules of BFQ (see [1] for details), this
+        * behavior causes bfqq, and hence any sync non-weight-raised
+        * queue, to get a lower number of requests served, and thus
+        * to ask for a lower number of requests from the request
+        * pool, before the busy weight-raised queues get served
+        * again. This often mitigates starvation problems in the
+        * presence of heavy write workloads and NCQ, thereby
+        * guaranteeing a higher application and system responsiveness
+        * in these hostile scenarios.
+        */
+       idling_boosts_thr_without_issues = idling_boosts_thr &&
+               bfqd->wr_busy_queues == 0;
+
+       /*
         * There is then a case where idling must be performed not for
         * throughput concerns, but to preserve service guarantees. To
         * introduce it, we can note that allowing the drive to
@@ -3770,7 +3818,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
         *    is necessary to preserve service guarantees.
         */
        return bfq_bfqq_sync(bfqq) &&
-               (idling_boosts_thr || asymmetric_scenario);
+               (idling_boosts_thr_without_issues || asymmetric_scenario);
 }
 
 /*
@@ -4975,6 +5023,7 @@ static int bfq_init_queue(struct request_queue *q, struct 
elevator_type *e)
                                              * high-definition compressed
                                              * video.
                                              */
+       bfqd->wr_busy_queues = 0;
 
        /*
         * Begin by assuming, optimistically, that the device peak rate is
-- 
1.9.1

Reply via email to