Re: [PATCH 05/14] blk-mq: allow software queue to map to multiple hardware queues

2018-10-29 Thread Hannes Reinecke

On 10/25/18 11:16 PM, Jens Axboe wrote:

The mapping used to be dependent on just the CPU location, but
now it's a tuple of { type, cpu} instead. This is a prep patch
for allowing a single software queue to map to multiple hardware
queues. No functional changes in this patch.

Signed-off-by: Jens Axboe 
---
  block/blk-mq-sched.c   |  2 +-
  block/blk-mq.c | 18 --
  block/blk-mq.h |  2 +-
  block/kyber-iosched.c  |  6 +++---
  include/linux/blk-mq.h |  3 ++-
  5 files changed, 19 insertions(+), 12 deletions(-)


Reviewed-by: Hannes Reinecke 

Cheers,

Hannes


[PATCH 05/14] blk-mq: allow software queue to map to multiple hardware queues

2018-10-25 Thread Jens Axboe
The mapping used to be dependent on just the CPU location, but
now it's a tuple of { type, cpu} instead. This is a prep patch
for allowing a single software queue to map to multiple hardware
queues. No functional changes in this patch.

Signed-off-by: Jens Axboe 
---
 block/blk-mq-sched.c   |  2 +-
 block/blk-mq.c | 18 --
 block/blk-mq.h |  2 +-
 block/kyber-iosched.c  |  6 +++---
 include/linux/blk-mq.h |  3 ++-
 5 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 8125e9393ec2..d232ecf3290c 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -110,7 +110,7 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx 
*hctx)
 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
  struct blk_mq_ctx *ctx)
 {
-   unsigned idx = ctx->index_hw;
+   unsigned short idx = ctx->index_hw[hctx->type];
 
if (++idx == hctx->nr_ctx)
idx = 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e6ea7da99125..fab84c6bda18 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -75,14 +75,18 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx 
*hctx)
 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
 struct blk_mq_ctx *ctx)
 {
-   if (!sbitmap_test_bit(>ctx_map, ctx->index_hw))
-   sbitmap_set_bit(>ctx_map, ctx->index_hw);
+   const int bit = ctx->index_hw[hctx->type];
+
+   if (!sbitmap_test_bit(>ctx_map, bit))
+   sbitmap_set_bit(>ctx_map, bit);
 }
 
 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
  struct blk_mq_ctx *ctx)
 {
-   sbitmap_clear_bit(>ctx_map, ctx->index_hw);
+   const int bit = ctx->index_hw[hctx->type];
+
+   sbitmap_clear_bit(>ctx_map, bit);
 }
 
 struct mq_inflight {
@@ -954,7 +958,7 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, 
unsigned int bitnr,
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start)
 {
-   unsigned off = start ? start->index_hw : 0;
+   unsigned off = start ? start->index_hw[hctx->type] : 0;
struct dispatch_rq_data data = {
.hctx = hctx,
.rq   = NULL,
@@ -2342,10 +2346,12 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue_type(q, 0, i);
-
+   hctx->type = 0;
cpumask_set_cpu(i, hctx->cpumask);
-   ctx->index_hw = hctx->nr_ctx;
+   ctx->index_hw[hctx->type] = hctx->nr_ctx;
hctx->ctxs[hctx->nr_ctx++] = ctx;
+   /* wrap */
+   BUG_ON(!hctx->nr_ctx);
}
 
mutex_unlock(>sysfs_lock);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 55428b92c019..7b5a790acdbf 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -17,7 +17,7 @@ struct blk_mq_ctx {
}  cacheline_aligned_in_smp;
 
unsigned intcpu;
-   unsigned intindex_hw;
+   unsigned short  index_hw[HCTX_MAX_TYPES];
 
/* incremented at dispatch time */
unsigned long   rq_dispatched[2];
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 728757a34fa0..b824a639d5d4 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -576,7 +576,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, 
struct bio *bio)
 {
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
-   struct kyber_ctx_queue *kcq = >kcqs[ctx->index_hw];
+   struct kyber_ctx_queue *kcq = >kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = >rq_list[sched_domain];
bool merged;
@@ -602,7 +602,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx 
*hctx,
 
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
-   struct kyber_ctx_queue *kcq = >kcqs[rq->mq_ctx->index_hw];
+   struct kyber_ctx_queue *kcq = 
>kcqs[rq->mq_ctx->index_hw[hctx->type]];
struct list_head *head = >rq_list[sched_domain];
 
spin_lock(>lock);
@@ -611,7 +611,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx 
*hctx,
else
list_move_tail(>queuelist, head);
sbitmap_set_bit(>kcq_map[sched_domain],
-   rq->mq_ctx->index_hw);
+   rq->mq_ctx->index_hw[hctx->type]);
blk_mq_sched_request_inserted(rq);
spin_unlock(>lock);
}
diff --git a/include/linux/blk-mq.h