commit:     5e0dbb42f182a56251c6e532d2fe2b922305a021
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 30 12:43:15 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 30 12:43:15 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5e0dbb42

BFQ v7r8

 0000_README                                        |  12 +-
 ...oups-kconfig-build-bits-for-BFQ-v7r8-3.18.patch |   8 +-
 ...ntroduce-the-BFQ-v7r8-I-O-sched-for-3.18.patch1 | 198 ++++++++++-----------
 ...arly-Queue-Merge-EQM-to-BFQ-v7r8-for-3.18.patch |  96 +++++-----
 4 files changed, 149 insertions(+), 165 deletions(-)

diff --git a/0000_README b/0000_README
index 61cb27a..21802d2 100644
--- a/0000_README
+++ b/0000_README
@@ -155,17 +155,17 @@ Patch:  
5000_enable-additional-cpu-optimizations-for-gcc.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc optimizations for additional CPUs.
 
-Patch:  5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.18.patch
+Patch:  5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-3.18.patch
 From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
-Desc:   BFQ v7r7 patch 1 for 3.18: Build, cgroups and kconfig bits
+Desc:   BFQ v7r8 patch 1 for 3.18: Build, cgroups and kconfig bits
 
-Patch:  5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.18.patch1
+Patch:  5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-3.18.patch1
 From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
-Desc:   BFQ v7r7 patch 2 for 3.18: BFQ Scheduler
+Desc:   BFQ v7r8 patch 2 for 3.18: BFQ Scheduler
 
-Patch:  5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.18.0.patch
+Patch:  5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-3.18.0.patch
 From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
-Desc:   BFQ v7r7 patch 3 for 3.18: Early Queue Merge (EQM)
+Desc:   BFQ v7r8 patch 3 for 3.18: Early Queue Merge (EQM)
 
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/

diff --git a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.18.patch 
b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-3.18.patch
similarity index 96%
rename from 5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.18.patch
rename to 5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-3.18.patch
index 9f0a7b3..f563b15 100644
--- a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.18.patch
+++ b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-3.18.patch
@@ -1,7 +1,7 @@
-From e5cfe6a861feffa55dda77cd60bdb571fc7c3065 Mon Sep 17 00:00:00 2001
+From cd515f7eea26e74f58a1f49562ff84cd115fce2e Mon Sep 17 00:00:00 2001
 From: Paolo Valente <paolo.vale...@unimore.it>
-Date: Mon, 8 Dec 2014 16:04:25 +0100
-Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r7-3.18
+Date: Sat, 6 Jun 2015 17:56:31 +0200
+Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r8-3.18.0
 
 Update Kconfig.iosched and do the related Makefile changes to include
 kernel configuration options for BFQ. Also add the bfqio controller
@@ -100,5 +100,5 @@ index 98c4f9b..13b010d 100644
  SUBSYS(perf_event)
  #endif
 -- 
-2.1.3
+2.1.4
 

diff --git a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.18.patch1 
b/5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-3.18.patch1
similarity index 98%
rename from 5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.18.patch1
rename to 5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-3.18.patch1
index dcd10f7..7cf1815 100644
--- a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.18.patch1
+++ b/5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-3.18.patch1
@@ -1,9 +1,9 @@
-From df04e0bc0d8b72775a74d45e355af9433f8e420e Mon Sep 17 00:00:00 2001
+From 4c8586fb29eb664cb379a179c0f851c693d49ce9 Mon Sep 17 00:00:00 2001
 From: Paolo Valente <paolo.vale...@unimore.it>
 Date: Thu, 9 May 2013 19:10:02 +0200
-Subject: [PATCH 2/3] block: introduce the BFQ-v7r7 I/O sched for 3.18
+Subject: [PATCH 2/3] block: introduce the BFQ-v7r8 I/O sched for 3.18.0
 
-Add the BFQ-v7r7 I/O scheduler to 3.18.
+Add the BFQ-v7r8 I/O scheduler to 3.18.0.
 The general structure is borrowed from CFQ, as much of the code for
 handling I/O contexts. Over time, several useful features have been
 ported from CFQ as well (details in the changelog in README.BFQ). A
@@ -56,12 +56,12 @@ until it expires.
 Signed-off-by: Paolo Valente <paolo.vale...@unimore.it>
 Signed-off-by: Arianna Avanzini <avanzini.aria...@gmail.com>
 ---
- block/bfq-cgroup.c  |  936 ++++++++++++
+ block/bfq-cgroup.c  |  936 +++++++++++++
  block/bfq-ioc.c     |   36 +
- block/bfq-iosched.c | 3902 +++++++++++++++++++++++++++++++++++++++++++++++++++
- block/bfq-sched.c   | 1214 ++++++++++++++++
- block/bfq.h         |  775 ++++++++++
- 5 files changed, 6863 insertions(+)
+ block/bfq-iosched.c | 3898 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-sched.c   | 1208 ++++++++++++++++
+ block/bfq.h         |  771 ++++++++++
+ 5 files changed, 6849 insertions(+)
  create mode 100644 block/bfq-cgroup.c
  create mode 100644 block/bfq-ioc.c
  create mode 100644 block/bfq-iosched.c
@@ -1054,10 +1054,10 @@ index 0000000..7f6b000
 +}
 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
 new file mode 100644
-index 0000000..97ee934
+index 0000000..773b2ee
 --- /dev/null
 +++ b/block/bfq-iosched.c
-@@ -0,0 +1,3902 @@
+@@ -0,0 +1,3898 @@
 +/*
 + * Budget Fair Queueing (BFQ) disk scheduler.
 + *
@@ -1130,9 +1130,6 @@ index 0000000..97ee934
 +#include "bfq.h"
 +#include "blk.h"
 +
-+/* Max number of dispatches in one round of service. */
-+static const int bfq_quantum = 4;
-+
 +/* Expiration time of sync (0) and async (1) requests, in jiffies. */
 +static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
 +
@@ -1240,6 +1237,20 @@ index 0000000..97ee934
 +#define bfq_sample_valid(samples)     ((samples) > 80)
 +
 +/*
++ * The following macro groups conditions that need to be evaluated when
++ * checking if existing queues and groups form a symmetric scenario
++ * and therefore idling can be reduced or disabled for some of the
++ * queues. See the comment to the function bfq_bfqq_must_not_expire()
++ * for further details.
++ */
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario      (!bfqd->active_numerous_groups && \
++                                 !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario      (!bfq_differentiated_weights(bfqd))
++#endif
++
++/*
 + * We regard a request as SYNC, if either it's a read or has the SYNC bit
 + * set (in which case it could also be a direct WRITE).
 + */
@@ -1429,7 +1440,6 @@ index 0000000..97ee934
 + */
 +static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
 +{
-+      BUG_ON(!bfqd->hw_tag);
 +      /*
 +       * For weights to differ, at least one of the trees must contain
 +       * at least two nodes.
@@ -1466,19 +1476,19 @@ index 0000000..97ee934
 +      struct rb_node **new = &(root->rb_node), *parent = NULL;
 +
 +      /*
-+       * Do not insert if:
-+       * - the device does not support queueing;
-+       * - the entity is already associated with a counter, which happens if:
-+       *   1) the entity is associated with a queue, 2) a request arrival
-+       *   has caused the queue to become both non-weight-raised, and hence
-+       *   change its weight, and backlogged; in this respect, each
-+       *   of the two events causes an invocation of this function,
-+       *   3) this is the invocation of this function caused by the second
-+       *   event. This second invocation is actually useless, and we handle
-+       *   this fact by exiting immediately. More efficient or clearer
-+       *   solutions might possibly be adopted.
++       * Do not insert if the entity is already associated with a
++       * counter, which happens if:
++       *   1) the entity is associated with a queue,
++       *   2) a request arrival has caused the queue to become both
++       *      non-weight-raised, and hence change its weight, and
++       *      backlogged; in this respect, each of the two events
++       *      causes an invocation of this function,
++       *   3) this is the invocation of this function caused by the
++       *      second event. This second invocation is actually useless,
++       *      and we handle this fact by exiting immediately. More
++       *      efficient or clearer solutions might possibly be adopted.
 +       */
-+      if (!bfqd->hw_tag || entity->weight_counter)
++      if (entity->weight_counter)
 +              return;
 +
 +      while (*new) {
@@ -1517,14 +1527,6 @@ index 0000000..97ee934
 +                                  struct bfq_entity *entity,
 +                                  struct rb_root *root)
 +{
-+      /*
-+       * Check whether the entity is actually associated with a counter.
-+       * In fact, the device may not be considered NCQ-capable for a while,
-+       * which implies that no insertion in the weight trees is performed,
-+       * after which the device may start to be deemed NCQ-capable, and hence
-+       * this function may start to be invoked. This may cause the function
-+       * to be invoked for entities that are not associated with any counter.
-+       */
 +      if (!entity->weight_counter)
 +              return;
 +
@@ -2084,7 +2086,8 @@ index 0000000..97ee934
 +              bfq_updated_next_req(bfqd, bfqq);
 +      }
 +
-+      list_del_init(&rq->queuelist);
++      if (rq->queuelist.prev != &rq->queuelist)
++              list_del_init(&rq->queuelist);
 +      BUG_ON(bfqq->queued[sync] == 0);
 +      bfqq->queued[sync]--;
 +      bfqd->queued--;
@@ -2159,14 +2162,22 @@ index 0000000..97ee934
 +static void bfq_merged_requests(struct request_queue *q, struct request *rq,
 +                              struct request *next)
 +{
-+      struct bfq_queue *bfqq = RQ_BFQQ(rq);
++      struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
 +
 +      /*
-+       * Reposition in fifo if next is older than rq.
++       * If next and rq belong to the same bfq_queue and next is older
++       * than rq, then reposition rq in the fifo (by substituting next
++       * with rq). Otherwise, if next and rq belong to different
++       * bfq_queues, never reposition rq: in fact, we would have to
++       * reposition it with respect to next's position in its own fifo,
++       * which would most certainly be too expensive with respect to
++       * the benefits.
 +       */
-+      if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++      if (bfqq == next_bfqq &&
++          !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
 +          time_before(next->fifo_time, rq->fifo_time)) {
-+              list_move(&rq->queuelist, &next->queuelist);
++              list_del_init(&rq->queuelist);
++              list_replace_init(&next->queuelist, &rq->queuelist);
 +              rq->fifo_time = next->fifo_time;
 +      }
 +
@@ -2444,14 +2455,16 @@ index 0000000..97ee934
 +       */
 +      sl = bfqd->bfq_slice_idle;
 +      /*
-+       * Unless the queue is being weight-raised, grant only minimum idle
-+       * time if the queue either has been seeky for long enough or has
-+       * already proved to be constantly seeky.
++       * Unless the queue is being weight-raised or the scenario is
++       * asymmetric, grant only minimum idle time if the queue either
++       * has been seeky for long enough or has already proved to be
++       * constantly seeky.
 +       */
 +      if (bfq_sample_valid(bfqq->seek_samples) &&
 +          ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
 +                                bfq_max_budget(bfqq->bfqd) / 8) ||
-+            bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
++            bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1 &&
++          symmetric_scenario)
 +              sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
 +      else if (bfqq->wr_coeff > 1)
 +              sl = sl * 3;
@@ -3265,12 +3278,6 @@ index 0000000..97ee934
 +static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
 +{
 +      struct bfq_data *bfqd = bfqq->bfqd;
-+#ifdef CONFIG_CGROUP_BFQIO
-+#define symmetric_scenario      (!bfqd->active_numerous_groups && \
-+                                 !bfq_differentiated_weights(bfqd))
-+#else
-+#define symmetric_scenario      (!bfq_differentiated_weights(bfqd))
-+#endif
 +#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
 +                                 bfqd->busy_in_flight_queues == \
 +                                 bfqd->const_seeky_busy_in_flight_queues)
@@ -3286,13 +3293,12 @@ index 0000000..97ee934
 + */
 +#define cond_for_expiring_non_wr  (bfqd->hw_tag && \
 +                                 (bfqd->wr_busy_queues > 0 || \
-+                                  (symmetric_scenario && \
-+                                   (blk_queue_nonrot(bfqd->queue) || \
-+                                    cond_for_seeky_on_ncq_hdd))))
++                                  (blk_queue_nonrot(bfqd->queue) || \
++                                    cond_for_seeky_on_ncq_hdd)))
 +
 +      return bfq_bfqq_sync(bfqq) &&
 +              !cond_for_expiring_in_burst &&
-+              (bfqq->wr_coeff > 1 ||
++              (bfqq->wr_coeff > 1 || !symmetric_scenario ||
 +               (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
 +                !cond_for_expiring_non_wr)
 +      );
@@ -3390,9 +3396,9 @@ index 0000000..97ee934
 +      }
 +
 +      /*
-+       * No requests pending.  If the in-service queue still has requests
-+       * in flight (possibly waiting for a completion) or is idling for a
-+       * new request, then keep it.
++       * No requests pending. However, if the in-service queue is idling
++       * for a new request, or has requests waiting for a completion and
++       * may idle after their completion, then keep it anyway.
 +       */
 +      if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
 +          (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
@@ -3595,14 +3601,13 @@ index 0000000..97ee934
 +      if (bfqq == NULL)
 +              return 0;
 +
-+      max_dispatch = bfqd->bfq_quantum;
 +      if (bfq_class_idle(bfqq))
 +              max_dispatch = 1;
 +
 +      if (!bfq_bfqq_sync(bfqq))
 +              max_dispatch = bfqd->bfq_max_budget_async_rq;
 +
-+      if (bfqq->dispatched >= max_dispatch) {
++      if (!bfq_bfqq_sync(bfqq) && bfqq->dispatched >= max_dispatch) {
 +              if (bfqd->busy_queues > 1)
 +                      return 0;
 +              if (bfqq->dispatched >= 4 * max_dispatch)
@@ -3618,8 +3623,8 @@ index 0000000..97ee934
 +      if (!bfq_dispatch_request(bfqd, bfqq))
 +              return 0;
 +
-+      bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
-+                      bfqq->pid, max_dispatch);
++      bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++                      bfq_bfqq_sync(bfqq) ? "sync" : "async");
 +
 +      return 1;
 +}
@@ -3724,14 +3729,11 @@ index 0000000..97ee934
 + * Update the entity prio values; note that the new values will not
 + * be used until the next (re)activation.
 + */
-+static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq 
*bic)
 +{
 +      struct task_struct *tsk = current;
 +      int ioprio_class;
 +
-+      if (!bfq_bfqq_prio_changed(bfqq))
-+              return;
-+
 +      ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
 +      switch (ioprio_class) {
 +      default:
@@ -3761,17 +3763,16 @@ index 0000000..97ee934
 +
 +      if (bfqq->entity.new_ioprio < 0 ||
 +          bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
-+              printk(KERN_CRIT "bfq_init_prio_data: new_ioprio %d\n",
++              printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n",
 +                               bfqq->entity.new_ioprio);
 +              BUG();
 +      }
 +
++      bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->entity.new_ioprio);
 +      bfqq->entity.ioprio_changed = 1;
-+
-+      bfq_clear_bfqq_prio_changed(bfqq);
 +}
 +
-+static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic)
 +{
 +      struct bfq_data *bfqd;
 +      struct bfq_queue *bfqq, *new_bfqq;
@@ -3788,6 +3789,8 @@ index 0000000..97ee934
 +      if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
 +              goto out;
 +
++      bic->ioprio = ioprio;
++
 +      bfqq = bic->bfqq[BLK_RW_ASYNC];
 +      if (bfqq != NULL) {
 +              bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
@@ -3797,7 +3800,7 @@ index 0000000..97ee934
 +              if (new_bfqq != NULL) {
 +                      bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
 +                      bfq_log_bfqq(bfqd, bfqq,
-+                                   "changed_ioprio: bfqq %p %d",
++                                   "check_ioprio_change: bfqq %p %d",
 +                                   bfqq, atomic_read(&bfqq->ref));
 +                      bfq_put_queue(bfqq);
 +              }
@@ -3805,16 +3808,14 @@ index 0000000..97ee934
 +
 +      bfqq = bic->bfqq[BLK_RW_SYNC];
 +      if (bfqq != NULL)
-+              bfq_mark_bfqq_prio_changed(bfqq);
-+
-+      bic->ioprio = ioprio;
++              bfq_set_next_ioprio_data(bfqq, bic);
 +
 +out:
 +      bfq_put_bfqd_unlock(bfqd, &flags);
 +}
 +
 +static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-+                        pid_t pid, int is_sync)
++                        struct bfq_io_cq *bic, pid_t pid, int is_sync)
 +{
 +      RB_CLEAR_NODE(&bfqq->entity.rb_node);
 +      INIT_LIST_HEAD(&bfqq->fifo);
@@ -3823,7 +3824,8 @@ index 0000000..97ee934
 +      atomic_set(&bfqq->ref, 0);
 +      bfqq->bfqd = bfqd;
 +
-+      bfq_mark_bfqq_prio_changed(bfqq);
++      if (bic)
++              bfq_set_next_ioprio_data(bfqq, bic);
 +
 +      if (is_sync) {
 +              if (!bfq_class_idle(bfqq))
@@ -3881,8 +3883,8 @@ index 0000000..97ee934
 +              }
 +
 +              if (bfqq != NULL) {
-+                      bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
-+                      bfq_init_prio_data(bfqq, bic);
++                      bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++                                      is_sync);
 +                      bfq_init_entity(&bfqq->entity, bfqg);
 +                      bfq_log_bfqq(bfqd, bfqq, "allocated");
 +              } else {
@@ -4120,7 +4122,6 @@ index 0000000..97ee934
 +      struct bfq_queue *bfqq = RQ_BFQQ(rq);
 +
 +      assert_spin_locked(bfqd->queue->queue_lock);
-+      bfq_init_prio_data(bfqq, RQ_BIC(rq));
 +
 +      bfq_add_request(rq);
 +
@@ -4257,11 +4258,8 @@ index 0000000..97ee934
 +              return ELV_MQUEUE_MAY;
 +
 +      bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
-+      if (bfqq != NULL) {
-+              bfq_init_prio_data(bfqq, bic);
-+
++      if (bfqq != NULL)
 +              return __bfq_may_queue(bfqq);
-+      }
 +
 +      return ELV_MQUEUE_MAY;
 +}
@@ -4339,7 +4337,7 @@ index 0000000..97ee934
 +
 +      might_sleep_if(gfp_mask & __GFP_WAIT);
 +
-+      bfq_changed_ioprio(bic);
++      bfq_check_ioprio_change(bic);
 +
 +      spin_lock_irqsave(q->queue_lock, flags);
 +
@@ -4543,10 +4541,12 @@ index 0000000..97ee934
 +       * Grab a permanent reference to it, so that the normal code flow
 +       * will not attempt to free it.
 +       */
-+      bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++      bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
 +      atomic_inc(&bfqd->oom_bfqq.ref);
 +      bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
 +      bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
++      bfqd->oom_bfqq.entity.new_weight =
++              bfq_ioprio_to_weight(bfqd->oom_bfqq.entity.new_ioprio);
 +      /*
 +       * Trigger weight initialization, according to ioprio, at the
 +       * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
@@ -4591,7 +4591,6 @@ index 0000000..97ee934
 +
 +      bfqd->bfq_max_budget = bfq_default_max_budget;
 +
-+      bfqd->bfq_quantum = bfq_quantum;
 +      bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
 +      bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
 +      bfqd->bfq_back_max = bfq_back_max;
@@ -4725,7 +4724,6 @@ index 0000000..97ee934
 +              __data = jiffies_to_msecs(__data);                      \
 +      return bfq_var_show(__data, (page));                            \
 +}
-+SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
 +SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
 +SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
 +SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
@@ -4762,7 +4760,6 @@ index 0000000..97ee934
 +              *(__PTR) = __data;                                      \
 +      return ret;                                                     \
 +}
-+STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
 +STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
 +              INT_MAX, 1);
 +STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
@@ -4863,7 +4860,6 @@ index 0000000..97ee934
 +      __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
 +
 +static struct elv_fs_entry bfq_attrs[] = {
-+      BFQ_ATTR(quantum),
 +      BFQ_ATTR(fifo_expire_sync),
 +      BFQ_ATTR(fifo_expire_async),
 +      BFQ_ATTR(back_seek_max),
@@ -4944,7 +4940,7 @@ index 0000000..97ee934
 +      device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
 +
 +      elv_register(&iosched_bfq);
-+      pr_info("BFQ I/O-scheduler version: v7r7");
++      pr_info("BFQ I/O-scheduler: v7r8");
 +
 +      return 0;
 +}
@@ -4962,10 +4958,10 @@ index 0000000..97ee934
 +MODULE_LICENSE("GPL");
 diff --git a/block/bfq-sched.c b/block/bfq-sched.c
 new file mode 100644
-index 0000000..2931563
+index 0000000..c343099
 --- /dev/null
 +++ b/block/bfq-sched.c
-@@ -0,0 +1,1214 @@
+@@ -0,0 +1,1208 @@
 +/*
 + * BFQ: Hierarchical B-WF2Q+ scheduler.
 + *
@@ -5604,13 +5600,7 @@ index 0000000..2931563
 +                      entity->orig_weight = entity->new_weight;
 +                      entity->ioprio =
 +                              bfq_weight_to_ioprio(entity->orig_weight);
-+              } else if (entity->new_ioprio != entity->ioprio) {
-+                      entity->ioprio = entity->new_ioprio;
-+                      entity->orig_weight =
-+                                      bfq_ioprio_to_weight(entity->ioprio);
-+              } else
-+                      entity->new_weight = entity->orig_weight =
-+                              bfq_ioprio_to_weight(entity->ioprio);
++              }
 +
 +              entity->ioprio_class = entity->new_ioprio_class;
 +              entity->ioprio_changed = 0;
@@ -6182,12 +6172,12 @@ index 0000000..2931563
 +}
 diff --git a/block/bfq.h b/block/bfq.h
 new file mode 100644
-index 0000000..649afe9
+index 0000000..3173b35
 --- /dev/null
 +++ b/block/bfq.h
-@@ -0,0 +1,775 @@
+@@ -0,0 +1,771 @@
 +/*
-+ * BFQ-v7r7 for 3.18.0: data structures and common functions prototypes.
++ * BFQ-v7r8 for 3.18.0: data structures and common functions prototypes.
 + *
 + * Based on ideas and code from CFQ:
 + * Copyright (C) 2003 Jens Axboe <ax...@kernel.dk>
@@ -6573,7 +6563,6 @@ index 0000000..649afe9
 + * @group_list: list of all the bfq_groups active on the device.
 + * @active_list: list of all the bfq_queues active on the device.
 + * @idle_list: list of all the bfq_queues idle on the device.
-+ * @bfq_quantum: max number of requests dispatched per dispatch round.
 + * @bfq_fifo_expire: timeout for async/sync requests; when it expires
 + *                   requests are served in fifo order.
 + * @bfq_back_penalty: weight of backward seeks wrt forward ones.
@@ -6681,7 +6670,6 @@ index 0000000..649afe9
 +      struct list_head active_list;
 +      struct list_head idle_list;
 +
-+      unsigned int bfq_quantum;
 +      unsigned int bfq_fifo_expire[2];
 +      unsigned int bfq_back_penalty;
 +      unsigned int bfq_back_max;
@@ -6724,7 +6712,6 @@ index 0000000..649afe9
 +      BFQ_BFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
 +      BFQ_BFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 +      BFQ_BFQQ_FLAG_idle_window,      /* slice idling enabled */
-+      BFQ_BFQQ_FLAG_prio_changed,     /* task priority has changed */
 +      BFQ_BFQQ_FLAG_sync,             /* synchronous queue */
 +      BFQ_BFQQ_FLAG_budget_new,       /* no completion with this budget */
 +      BFQ_BFQQ_FLAG_IO_bound,         /*
@@ -6767,7 +6754,6 @@ index 0000000..649afe9
 +BFQ_BFQQ_FNS(must_alloc);
 +BFQ_BFQQ_FNS(fifo_expire);
 +BFQ_BFQQ_FNS(idle_window);
-+BFQ_BFQQ_FNS(prio_changed);
 +BFQ_BFQQ_FNS(sync);
 +BFQ_BFQQ_FNS(budget_new);
 +BFQ_BFQQ_FNS(IO_bound);
@@ -6949,7 +6935,7 @@ index 0000000..649afe9
 +      spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
 +}
 +
-+static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic);
 +static void bfq_put_queue(struct bfq_queue *bfqq);
 +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
 +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
@@ -6962,5 +6948,5 @@ index 0000000..649afe9
 +
 +#endif /* _BFQ_H */
 -- 
-2.1.3
+2.1.4
 

diff --git 
a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.18.0.patch 
b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-3.18.patch
similarity index 94%
rename from 
5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.18.0.patch
rename to 5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-3.18.patch
index 1292c2b..121fb5d 100644
--- a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.18.0.patch
+++ b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-3.18.patch
@@ -1,7 +1,7 @@
-From b941be79912544b39141eff175bc1964568a5b1b Mon Sep 17 00:00:00 2001
+From 957b7799fa08de8a84561c7c3299c05b695bb19d Mon Sep 17 00:00:00 2001
 From: Mauro Andreolini <mauro.andreol...@unimore.it>
-Date: Thu, 18 Dec 2014 21:32:08 +0100
-Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r7 for
+Date: Fri, 5 Jun 2015 17:45:40 +0200
+Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r8 for
  3.18.0
 
 A set of processes may happen  to  perform interleaved reads, i.e.,requests
@@ -34,16 +34,16 @@ Signed-off-by: Mauro Andreolini 
<mauro.andreol...@unimore.it>
 Signed-off-by: Arianna Avanzini <avanzini.aria...@gmail.com>
 Signed-off-by: Paolo Valente <paolo.vale...@unimore.it>
 ---
- block/bfq-iosched.c | 751 +++++++++++++++++++++++++++++++++++++---------------
+ block/bfq-iosched.c | 750 +++++++++++++++++++++++++++++++++++++---------------
  block/bfq-sched.c   |  28 --
  block/bfq.h         |  54 +++-
- 3 files changed, 581 insertions(+), 252 deletions(-)
+ 3 files changed, 580 insertions(+), 252 deletions(-)
 
 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
-index 97ee934..328f33c 100644
+index 773b2ee..71b51c1 100644
 --- a/block/bfq-iosched.c
 +++ b/block/bfq-iosched.c
-@@ -571,6 +571,57 @@ static inline unsigned int bfq_wr_duration(struct 
bfq_data *bfqd)
+@@ -573,6 +573,57 @@ static inline unsigned int bfq_wr_duration(struct 
bfq_data *bfqd)
        return dur;
  }
  
@@ -101,7 +101,7 @@ index 97ee934..328f33c 100644
  /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
  static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
                                        struct bfq_queue *bfqq)
-@@ -815,7 +866,7 @@ static void bfq_add_request(struct request *rq)
+@@ -817,7 +868,7 @@ static void bfq_add_request(struct request *rq)
                bfq_rq_pos_tree_add(bfqd, bfqq);
  
        if (!bfq_bfqq_busy(bfqq)) {
@@ -110,7 +110,7 @@ index 97ee934..328f33c 100644
                     idle_for_long_time = time_is_before_jiffies(
                                                bfqq->budget_timeout +
                                                bfqd->bfq_wr_min_idle_time);
-@@ -839,11 +890,12 @@ static void bfq_add_request(struct request *rq)
+@@ -841,11 +892,12 @@ static void bfq_add_request(struct request *rq)
                                bfqd->last_ins_in_burst = jiffies;
                }
  
@@ -126,7 +126,7 @@ index 97ee934..328f33c 100644
                entity->budget = max_t(unsigned long, bfqq->max_budget,
                                       bfq_serv_to_charge(next_rq, bfqq));
  
-@@ -862,11 +914,20 @@ static void bfq_add_request(struct request *rq)
+@@ -864,11 +916,20 @@ static void bfq_add_request(struct request *rq)
                if (!bfqd->low_latency)
                        goto add_bfqq_busy;
  
@@ -150,7 +150,7 @@ index 97ee934..328f33c 100644
                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
                        if (interactive)
                                bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-@@ -880,7 +941,7 @@ static void bfq_add_request(struct request *rq)
+@@ -882,7 +943,7 @@ static void bfq_add_request(struct request *rq)
                } else if (old_wr_coeff > 1) {
                        if (interactive)
                                bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
@@ -159,7 +159,7 @@ index 97ee934..328f33c 100644
                                 (bfqq->wr_cur_max_time ==
                                  bfqd->bfq_wr_rt_max_time &&
                                  !soft_rt)) {
-@@ -899,18 +960,18 @@ static void bfq_add_request(struct request *rq)
+@@ -901,18 +962,18 @@ static void bfq_add_request(struct request *rq)
                                /*
                                 *
                                 * The remaining weight-raising time is lower
@@ -190,7 +190,7 @@ index 97ee934..328f33c 100644
                                 *
                                 * In addition, the application is now meeting
                                 * the requirements for being deemed soft rt.
-@@ -945,6 +1006,7 @@ static void bfq_add_request(struct request *rq)
+@@ -947,6 +1008,7 @@ static void bfq_add_request(struct request *rq)
                                        bfqd->bfq_wr_rt_max_time;
                        }
                }
@@ -198,7 +198,7 @@ index 97ee934..328f33c 100644
                if (old_wr_coeff != bfqq->wr_coeff)
                        entity->ioprio_changed = 1;
  add_bfqq_busy:
-@@ -1156,90 +1218,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+@@ -1167,90 +1229,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
        spin_unlock_irq(bfqd->queue->queue_lock);
  }
  
@@ -303,7 +303,7 @@ index 97ee934..328f33c 100644
  
        if (RB_EMPTY_ROOT(root))
                return NULL;
-@@ -1258,7 +1265,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
+@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
         * next_request position).
         */
        __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
@@ -312,7 +312,7 @@ index 97ee934..328f33c 100644
                return __bfqq;
  
        if (blk_rq_pos(__bfqq->next_rq) < sector)
-@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
+@@ -1280,7 +1287,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
                return NULL;
  
        __bfqq = rb_entry(node, struct bfq_queue, pos_node);
@@ -321,7 +321,7 @@ index 97ee934..328f33c 100644
                return __bfqq;
  
        return NULL;
-@@ -1278,14 +1285,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
+@@ -1289,14 +1296,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data 
*bfqd)
  /*
   * bfqd - obvious
   * cur_bfqq - passed in so that we don't decide that the current queue
@@ -340,7 +340,7 @@ index 97ee934..328f33c 100644
  {
        struct bfq_queue *bfqq;
  
-@@ -1305,7 +1310,7 @@ static struct bfq_queue *bfq_close_cooperator(struct 
bfq_data *bfqd,
+@@ -1316,7 +1321,7 @@ static struct bfq_queue *bfq_close_cooperator(struct 
bfq_data *bfqd,
         * working closely on the same area of the disk. In that case,
         * we can group them together and don't waste time idling.
         */
@@ -349,7 +349,7 @@ index 97ee934..328f33c 100644
        if (bfqq == NULL || bfqq == cur_bfqq)
                return NULL;
  
-@@ -1332,6 +1337,315 @@ static struct bfq_queue *bfq_close_cooperator(struct 
bfq_data *bfqd,
+@@ -1343,6 +1348,315 @@ static struct bfq_queue *bfq_close_cooperator(struct 
bfq_data *bfqd,
        return bfqq;
  }
  
@@ -665,7 +665,7 @@ index 97ee934..328f33c 100644
  /*
   * If enough samples have been computed, return the current max budget
   * stored in bfqd, which is dynamically updated according to the
-@@ -1475,61 +1789,6 @@ static struct request *bfq_check_fifo(struct bfq_queue 
*bfqq)
+@@ -1488,61 +1802,6 @@ static struct request *bfq_check_fifo(struct bfq_queue 
*bfqq)
        return rq;
  }
  
@@ -727,7 +727,7 @@ index 97ee934..328f33c 100644
  static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
  {
        struct bfq_entity *entity = &bfqq->entity;
-@@ -2263,7 +2522,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue 
*bfqq)
+@@ -2269,7 +2528,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue 
*bfqq)
   */
  static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
  {
@@ -736,7 +736,7 @@ index 97ee934..328f33c 100644
        struct request *next_rq;
        enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
  
-@@ -2273,17 +2532,6 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
+@@ -2279,17 +2538,6 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
  
        bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
  
@@ -754,7 +754,7 @@ index 97ee934..328f33c 100644
        if (bfq_may_expire_for_budg_timeout(bfqq) &&
            !timer_pending(&bfqd->idle_slice_timer) &&
            !bfq_bfqq_must_idle(bfqq))
-@@ -2322,10 +2570,7 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
+@@ -2328,10 +2576,7 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
                                bfq_clear_bfqq_wait_request(bfqq);
                                del_timer(&bfqd->idle_slice_timer);
                        }
@@ -766,9 +766,9 @@ index 97ee934..328f33c 100644
                }
        }
  
-@@ -2334,40 +2579,30 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
-        * in flight (possibly waiting for a completion) or is idling for a
-        * new request, then keep it.
+@@ -2340,40 +2585,30 @@ static struct bfq_queue *bfq_select_queue(struct 
bfq_data *bfqd)
+        * for a new request, or has requests waiting for a completion and
+        * may idle after their completion, then keep it anyway.
         */
 -      if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
 -          (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
@@ -814,7 +814,7 @@ index 97ee934..328f33c 100644
                        jiffies_to_msecs(bfqq->wr_cur_max_time),
                        bfqq->wr_coeff,
                        bfqq->entity.weight, bfqq->entity.orig_weight);
-@@ -2376,12 +2611,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+@@ -2382,12 +2617,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
                       entity->orig_weight * bfqq->wr_coeff);
                if (entity->ioprio_changed)
                        bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
@@ -832,7 +832,7 @@ index 97ee934..328f33c 100644
                    time_is_before_jiffies(bfqq->last_wr_start_finish +
                                           bfqq->wr_cur_max_time)) {
                        bfqq->last_wr_start_finish = jiffies;
-@@ -2390,11 +2629,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+@@ -2396,11 +2635,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
                                     bfqq->last_wr_start_finish,
                                     jiffies_to_msecs(bfqq->wr_cur_max_time));
                        bfq_bfqq_end_wr(bfqq);
@@ -849,7 +849,7 @@ index 97ee934..328f33c 100644
  }
  
  /*
-@@ -2642,6 +2883,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
+@@ -2647,6 +2888,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
        struct bfq_io_cq *bic = icq_to_bic(icq);
  
        bic->ttime.last_end_request = jiffies;
@@ -875,7 +875,7 @@ index 97ee934..328f33c 100644
  }
  
  static void bfq_exit_icq(struct io_cq *icq)
-@@ -2655,6 +2915,13 @@ static void bfq_exit_icq(struct io_cq *icq)
+@@ -2660,6 +2920,13 @@ static void bfq_exit_icq(struct io_cq *icq)
        }
  
        if (bic->bfqq[BLK_RW_SYNC]) {
@@ -889,7 +889,7 @@ index 97ee934..328f33c 100644
                bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
                bic->bfqq[BLK_RW_SYNC] = NULL;
        }
-@@ -2950,6 +3217,10 @@ static void bfq_update_idle_window(struct bfq_data 
*bfqd,
+@@ -2952,6 +3219,10 @@ static void bfq_update_idle_window(struct bfq_data 
*bfqd,
        if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
                return;
  
@@ -900,7 +900,7 @@ index 97ee934..328f33c 100644
        enable_idle = bfq_bfqq_idle_window(bfqq);
  
        if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
-@@ -2997,6 +3268,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, 
struct bfq_queue *bfqq,
+@@ -2999,6 +3270,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, 
struct bfq_queue *bfqq,
        if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
            !BFQQ_SEEKY(bfqq))
                bfq_update_idle_window(bfqd, bfqq, bic);
@@ -908,7 +908,7 @@ index 97ee934..328f33c 100644
  
        bfq_log_bfqq(bfqd, bfqq,
                     "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
-@@ -3057,13 +3329,49 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, 
struct bfq_queue *bfqq,
+@@ -3059,12 +3331,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, 
struct bfq_queue *bfqq,
  static void bfq_insert_request(struct request_queue *q, struct request *rq)
  {
        struct bfq_data *bfqd = q->elevator->elevator_data;
@@ -916,7 +916,7 @@ index 97ee934..328f33c 100644
 +      struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
  
        assert_spin_locked(bfqd->queue->queue_lock);
-+
+ 
 +      /*
 +       * An unplug may trigger a requeue of a request from the device
 +       * driver: make sure we are in process context while trying to
@@ -944,8 +944,6 @@ index 97ee934..328f33c 100644
 +                      bfq_bfqq_increase_failed_cooperations(bfqq);
 +      }
 +
-       bfq_init_prio_data(bfqq, RQ_BIC(rq));
- 
        bfq_add_request(rq);
  
 +      /*
@@ -959,7 +957,7 @@ index 97ee934..328f33c 100644
        rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
        list_add_tail(&rq->queuelist, &bfqq->fifo);
  
-@@ -3228,18 +3536,6 @@ static void bfq_put_request(struct request *rq)
+@@ -3226,18 +3533,6 @@ static void bfq_put_request(struct request *rq)
        }
  }
  
@@ -978,7 +976,7 @@ index 97ee934..328f33c 100644
  /*
   * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
   * was the last process referring to said bfqq.
-@@ -3248,6 +3544,9 @@ static struct bfq_queue *
+@@ -3246,6 +3541,9 @@ static struct bfq_queue *
  bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
  {
        bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
@@ -988,7 +986,7 @@ index 97ee934..328f33c 100644
        if (bfqq_process_refs(bfqq) == 1) {
                bfqq->pid = current->pid;
                bfq_clear_bfqq_coop(bfqq);
-@@ -3276,6 +3575,7 @@ static int bfq_set_request(struct request_queue *q, 
struct request *rq,
+@@ -3274,6 +3572,7 @@ static int bfq_set_request(struct request_queue *q, 
struct request *rq,
        struct bfq_queue *bfqq;
        struct bfq_group *bfqg;
        unsigned long flags;
@@ -996,7 +994,7 @@ index 97ee934..328f33c 100644
  
        might_sleep_if(gfp_mask & __GFP_WAIT);
  
-@@ -3293,25 +3593,26 @@ new_queue:
+@@ -3291,25 +3590,26 @@ new_queue:
        if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
                bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
                bic_set_bfqq(bic, bfqq, is_sync);
@@ -1035,7 +1033,7 @@ index 97ee934..328f33c 100644
        }
  
        bfqq->allocated[rw]++;
-@@ -3322,6 +3623,26 @@ new_queue:
+@@ -3320,6 +3620,26 @@ new_queue:
        rq->elv.priv[0] = bic;
        rq->elv.priv[1] = bfqq;
  
@@ -1063,10 +1061,10 @@ index 97ee934..328f33c 100644
  
        return 0;
 diff --git a/block/bfq-sched.c b/block/bfq-sched.c
-index 2931563..6764a7e 100644
+index c343099..d0890c6 100644
 --- a/block/bfq-sched.c
 +++ b/block/bfq-sched.c
-@@ -1091,34 +1091,6 @@ static struct bfq_queue *bfq_get_next_queue(struct 
bfq_data *bfqd)
+@@ -1085,34 +1085,6 @@ static struct bfq_queue *bfq_get_next_queue(struct 
bfq_data *bfqd)
        return bfqq;
  }
  
@@ -1102,7 +1100,7 @@ index 2931563..6764a7e 100644
  {
        if (bfqd->in_service_bic != NULL) {
 diff --git a/block/bfq.h b/block/bfq.h
-index 649afe9..0767d75 100644
+index 3173b35..629c413 100644
 --- a/block/bfq.h
 +++ b/block/bfq.h
 @@ -218,18 +218,21 @@ struct bfq_group;
@@ -1184,8 +1182,8 @@ index 649afe9..0767d75 100644
  };
  
  enum bfq_device_speed {
-@@ -539,7 +573,7 @@ enum bfqq_state_flags {
-       BFQ_BFQQ_FLAG_prio_changed,     /* task priority has changed */
+@@ -536,7 +570,7 @@ enum bfqq_state_flags {
+       BFQ_BFQQ_FLAG_idle_window,      /* slice idling enabled */
        BFQ_BFQQ_FLAG_sync,             /* synchronous queue */
        BFQ_BFQQ_FLAG_budget_new,       /* no completion with this budget */
 -      BFQ_BFQQ_FLAG_IO_bound,         /*
@@ -1193,7 +1191,7 @@ index 649afe9..0767d75 100644
                                         * bfqq has timed-out at least once
                                         * having consumed at most 2/10 of
                                         * its budget
-@@ -552,12 +586,13 @@ enum bfqq_state_flags {
+@@ -549,12 +583,13 @@ enum bfqq_state_flags {
                                         * bfqq has proved to be slow and
                                         * seeky until budget timeout
                                         */
@@ -1209,7 +1207,7 @@ index 649afe9..0767d75 100644
  };
  
  #define BFQ_BFQQ_FNS(name)                                            \
-@@ -587,6 +622,7 @@ BFQ_BFQQ_FNS(in_large_burst);
+@@ -583,6 +618,7 @@ BFQ_BFQQ_FNS(in_large_burst);
  BFQ_BFQQ_FNS(constantly_seeky);
  BFQ_BFQQ_FNS(coop);
  BFQ_BFQQ_FNS(split_coop);
@@ -1218,5 +1216,5 @@ index 649afe9..0767d75 100644
  #undef BFQ_BFQQ_FNS
  
 -- 
-2.1.3
+2.1.4
 

Reply via email to