Re: [PATCH 1/3] nvme: Pass the queue to SQ_SIZE/CQ_SIZE macros

2019-07-16 Thread Christoph Hellwig
On Tue, Jul 16, 2019 at 10:46:47AM +1000, Benjamin Herrenschmidt wrote:
> This will make it easier to handle variable queue entry sizes
> later. No functional change.

Looks good to me:

Reviewed-by: Christoph Hellwig 


Re: [PATCH 1/3] nvme: Pass the queue to SQ_SIZE/CQ_SIZE macros

2019-07-15 Thread Benjamin Herrenschmidt
On Tue, 2019-07-16 at 10:46 +1000, Benjamin Herrenschmidt wrote:
> # Conflicts:
> #   drivers/nvme/host/pci.c
> ---

Oops :-) You can strip that or should I resend ? I'll wait for
comments/reviews regardless.

Cheers,
Ben.



[PATCH 1/3] nvme: Pass the queue to SQ_SIZE/CQ_SIZE macros

2019-07-15 Thread Benjamin Herrenschmidt
This will make it easier to handle variable queue entry sizes
later. No functional change.

Signed-off-by: Benjamin Herrenschmidt 

# Conflicts:
#   drivers/nvme/host/pci.c
---
 drivers/nvme/host/pci.c | 30 +++---
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index dd10cf78f2d3..8f006638452b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -28,8 +28,8 @@
 #include "trace.h"
 #include "nvme.h"
 
-#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
-#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command))
+#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
@@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct 
request *req, bool reserved)
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
 {
-   dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
+   dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
if (!nvmeq->sq_cmds)
return;
 
if (test_and_clear_bit(NVMEQ_SQ_CMB, >flags)) {
pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
-   nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
+   nvmeq->sq_cmds, SQ_SIZE(nvmeq));
} else {
-   dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
+   dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
 }
@@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int 
nr_io_queues,
 }
 
 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
-   int qid, int depth)
+   int qid)
 {
struct pci_dev *pdev = to_pci_dev(dev->dev);
 
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
-   nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+   nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
if (nvmeq->sq_cmds) {
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
@@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, 
struct nvme_queue *nvmeq,
return 0;
}
 
-   pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
+   pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
}
}
 
-   nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+   nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
>sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
@@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int 
qid, int depth)
if (dev->ctrl.queue_count > qid)
return 0;
 
-   nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+   nvmeq->q_depth = depth;
+   nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
 >cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
 
-   if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
+   if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
goto free_cqdma;
 
nvmeq->dev = dev;
@@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int 
qid, int depth)
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = >dbs[qid * 2 * dev->db_stride];
-   nvmeq->q_depth = depth;
nvmeq->qid = qid;
dev->ctrl.queue_count++;
 
return 0;
 
  free_cqdma:
-   dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
-   nvmeq->cq_dma_addr);
+   dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
  free_nvmeq:
return -ENOMEM;
 }
@@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 
qid)
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = >dbs[qid * 2 * dev->db_stride];
-   memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+   memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
nvme_dbbuf_init(dev, nvmeq, qid);
dev->online_queues++;
wmb(); /* ensure the first interrupt sees the initialization */
-- 
2.17.1