This patch sets map->nr_queues as zero explictly if there is zero
queues for such queue type, then blk_mq_map_swqueue() can become
more robust to deal with shared mappings.

Cc: Jeff Moyer <[email protected]>
Cc: Mike Snitzer <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
 block/blk-mq.c          |  3 +++
 drivers/nvme/host/pci.c | 37 ++++++++++++++++++++-----------------
 2 files changed, 23 insertions(+), 17 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index a4a0895dae65..a737d912c46b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2435,6 +2435,9 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                for (j = 0; j < set->nr_maps; j++) {
                        hctx = blk_mq_map_queue_type(q, j, i);
 
+                       if (!set->map[j].nr_queues)
+                                       continue;
+
                        /*
                         * If the CPU is already set in the mask, then we've
                         * mapped this one already. This can happen if
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 95bd68be2078..43074c54279e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -492,29 +492,32 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
        offset = queue_irq_offset(dev);
        for (i = 0, qoff = 0; i < set->nr_maps; i++) {
                struct blk_mq_queue_map *map = &set->map[i];
+               unsigned nr_queues;
 
-               map->nr_queues = dev->io_queues[i];
-               if (!map->nr_queues) {
+               nr_queues = map->nr_queues = dev->io_queues[i];
+               if (!nr_queues) {
                        BUG_ON(i == HCTX_TYPE_DEFAULT);
 
-                       /* shared set, resuse read set parameters */
-                       map->nr_queues = dev->io_queues[HCTX_TYPE_DEFAULT];
+                       /* shared set, resuse default set parameters and table 
*/
+                       nr_queues = dev->io_queues[HCTX_TYPE_DEFAULT];
                        qoff = 0;
                        offset = queue_irq_offset(dev);
-               }
 
-               /*
-                * The poll queue(s) doesn't have an IRQ (and hence IRQ
-                * affinity), so use the regular blk-mq cpu mapping if
-                * poll queue(s) don't share mapping with TYPE_DEFAULT.
-                */
-               map->queue_offset = qoff;
-               if (i != HCTX_TYPE_POLL || !qoff)
-                       blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), 
offset);
-               else
-                       blk_mq_map_queues(map);
-               qoff += map->nr_queues;
-               offset += map->nr_queues;
+                       memcpy(map->mq_map, set->map[HCTX_TYPE_DEFAULT].mq_map,
+                                  nr_cpu_ids * sizeof(map->mq_map[0]));
+               } else {
+                       /*
+                        * The poll queue(s) doesn't have an IRQ (and hence IRQ
+                        * affinity), so use the regular blk-mq cpu mapping.
+                        */
+                       map->queue_offset = qoff;
+                       if (i != HCTX_TYPE_POLL)
+                               blk_mq_pci_map_queues(map, 
to_pci_dev(dev->dev), offset);
+                       else
+                               blk_mq_map_queues(map);
+               }
+               qoff += nr_queues;
+               offset += nr_queues;
        }
 
        return 0;
-- 
2.9.5

Reply via email to