Re: [PATCH v6 09/42] nvme: add max_ioqpairs device parameter

2020-03-31 Thread Maxim Levitsky
On Tue, 2020-03-31 at 07:40 +0200, Klaus Birkelund Jensen wrote:
> On Mar 25 12:39, Maxim Levitsky wrote:
> > On Mon, 2020-03-16 at 07:28 -0700, Klaus Jensen wrote:
> > > From: Klaus Jensen 
> > > 
> > > The num_queues device paramater has a slightly confusing meaning because
> > > it accounts for the admin queue pair which is not really optional.
> > > Secondly, it is really a maximum value of queues allowed.
> > > 
> > > Add a new max_ioqpairs parameter that only accounts for I/O queue pairs,
> > > but keep num_queues for compatibility.
> > > 
> > > Signed-off-by: Klaus Jensen 
> > > ---
> > >  hw/block/nvme.c | 45 ++---
> > >  hw/block/nvme.h |  4 +++-
> > >  2 files changed, 29 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > > index 7cf7cf55143e..7dfd8a1a392d 100644
> > > --- a/hw/block/nvme.c
> > > +++ b/hw/block/nvme.c
> > > @@ -1332,9 +1333,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> > > **errp)
> > >  int64_t bs_size;
> > >  uint8_t *pci_conf;
> > >  
> > > -if (!n->params.num_queues) {
> > > -error_setg(errp, "num_queues can't be zero");
> > > -return;
> > > +if (n->params.num_queues) {
> > > +warn_report("nvme: num_queues is deprecated; please use 
> > > max_ioqpairs "
> > > +"instead");
> > > +
> > > +n->params.max_ioqpairs = n->params.num_queues - 1;
> > > +}
> > > +
> > > +if (!n->params.max_ioqpairs) {
> > > +error_setg(errp, "max_ioqpairs can't be less than 1");
> > >  }
> > 
> > This is not even a nitpick, but just and idea.
> > 
> > It might be worth it to allow max_ioqpairs=0 to simulate a 'broken'
> > nvme controller. I know that kernel has special handling for such 
> > controllers,
> > which include only creation of the control character device (/dev/nvme*) 
> > through
> > which the user can submit commands to try and 'fix' the controller (by 
> > re-uploading firmware
> > maybe or something like that).
> > 
> > 
> 
> Not sure about the implications of this, so I'll leave that on the TODO
> :) But a controller with no I/O queues is an "Administrative Controller"
> and perfectly legal in NVMe v1.4 AFAIK.
That what I was thinking as well. Keeping this on a TODO list is perfectly fine.

> 
> > >  
> > >  if (!n->conf.blk) {
> > > @@ -1365,19 +1372,19 @@ static void nvme_realize(PCIDevice *pci_dev, 
> > > Error **errp)
> > >  pcie_endpoint_cap_init(pci_dev, 0x80);
> > >  
> > >  n->num_namespaces = 1;
> > > -n->reg_size = pow2ceil(0x1004 + 2 * (n->params.num_queues + 1) * 4);
> > > +n->reg_size = pow2ceil(0x1008 + 2 * (n->params.max_ioqpairs) * 4);
> > 
> > I hate to say it, but it looks like this thing (which I mentioned to you in 
> > V5)
> > was pre-existing bug, which is indeed fixed now.
> > In theory such fixes should go to separate patches, but in this case, I 
> > guess it would
> > be too much to ask for it.
> > Maybe mention this in the commit message instead, so that this fix doesn't 
> > stay hidden like that?
> > 
> > 
> 
> I'm convinced now. I have added a preparatory bugfix patch before this
> patch.
Thanks a lot!. 
Sorry for not noticing it before.

> 
> > 
> > Reviewed-by: Maxim Levitsky 
> > 
> > Best regards,
> > Maxim Levitsky
> > 

Best regards,
Maxim Levitsky
> 
> 





Re: [PATCH v6 09/42] nvme: add max_ioqpairs device parameter

2020-03-30 Thread Klaus Birkelund Jensen
On Mar 25 12:39, Maxim Levitsky wrote:
> On Mon, 2020-03-16 at 07:28 -0700, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > The num_queues device paramater has a slightly confusing meaning because
> > it accounts for the admin queue pair which is not really optional.
> > Secondly, it is really a maximum value of queues allowed.
> > 
> > Add a new max_ioqpairs parameter that only accounts for I/O queue pairs,
> > but keep num_queues for compatibility.
> > 
> > Signed-off-by: Klaus Jensen 
> > ---
> >  hw/block/nvme.c | 45 ++---
> >  hw/block/nvme.h |  4 +++-
> >  2 files changed, 29 insertions(+), 20 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 7cf7cf55143e..7dfd8a1a392d 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -1332,9 +1333,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> > **errp)
> >  int64_t bs_size;
> >  uint8_t *pci_conf;
> >  
> > -if (!n->params.num_queues) {
> > -error_setg(errp, "num_queues can't be zero");
> > -return;
> > +if (n->params.num_queues) {
> > +warn_report("nvme: num_queues is deprecated; please use 
> > max_ioqpairs "
> > +"instead");
> > +
> > +n->params.max_ioqpairs = n->params.num_queues - 1;
> > +}
> > +
> > +if (!n->params.max_ioqpairs) {
> > +error_setg(errp, "max_ioqpairs can't be less than 1");
> >  }
> This is not even a nitpick, but just and idea.
> 
> It might be worth it to allow max_ioqpairs=0 to simulate a 'broken'
> nvme controller. I know that kernel has special handling for such controllers,
> which include only creation of the control character device (/dev/nvme*) 
> through
> which the user can submit commands to try and 'fix' the controller (by 
> re-uploading firmware
> maybe or something like that).
> 
> 

Not sure about the implications of this, so I'll leave that on the TODO
:) But a controller with no I/O queues is an "Administrative Controller"
and perfectly legal in NVMe v1.4 AFAIK.

> >  
> >  if (!n->conf.blk) {
> > @@ -1365,19 +1372,19 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> > **errp)
> >  pcie_endpoint_cap_init(pci_dev, 0x80);
> >  
> >  n->num_namespaces = 1;
> > -n->reg_size = pow2ceil(0x1004 + 2 * (n->params.num_queues + 1) * 4);
> > +n->reg_size = pow2ceil(0x1008 + 2 * (n->params.max_ioqpairs) * 4);
> 
> I hate to say it, but it looks like this thing (which I mentioned to you in 
> V5)
> was pre-existing bug, which is indeed fixed now.
> In theory such fixes should go to separate patches, but in this case, I guess 
> it would
> be too much to ask for it.
> Maybe mention this in the commit message instead, so that this fix doesn't 
> stay hidden like that?
> 
> 

I'm convinced now. I have added a preparatory bugfix patch before this
patch.

> 
> Reviewed-by: Maxim Levitsky 
> 
> Best regards,
>   Maxim Levitsky
> 




Re: [PATCH v6 09/42] nvme: add max_ioqpairs device parameter

2020-03-25 Thread Maxim Levitsky
On Mon, 2020-03-16 at 07:28 -0700, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> The num_queues device paramater has a slightly confusing meaning because
> it accounts for the admin queue pair which is not really optional.
> Secondly, it is really a maximum value of queues allowed.
> 
> Add a new max_ioqpairs parameter that only accounts for I/O queue pairs,
> but keep num_queues for compatibility.
> 
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c | 45 ++---
>  hw/block/nvme.h |  4 +++-
>  2 files changed, 29 insertions(+), 20 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 7cf7cf55143e..7dfd8a1a392d 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -19,7 +19,7 @@
>   *  -drive file=,if=none,id=
>   *  -device nvme,drive=,serial=,id=, \
>   *  cmb_size_mb=, \
> - *  num_queues=
> + *  max_ioqpairs=
>   *
>   * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
>   * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
> @@ -27,6 +27,7 @@
>  
>  #include "qemu/osdep.h"
>  #include "qemu/units.h"
> +#include "qemu/error-report.h"
>  #include "hw/block/block.h"
>  #include "hw/pci/msix.h"
>  #include "hw/pci/pci.h"
> @@ -72,12 +73,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void 
> *buf, int size)
>  
>  static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
>  {
> -return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
> +return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
>  }
>  
>  static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
>  {
> -return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
> +return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
>  }
>  
>  static void nvme_inc_cq_tail(NvmeCQueue *cq)
> @@ -639,7 +640,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
>  trace_nvme_dev_err_invalid_create_cq_addr(prp1);
>  return NVME_INVALID_FIELD | NVME_DNR;
>  }
> -if (unlikely(vector > n->params.num_queues)) {
> +if (unlikely(vector > n->params.max_ioqpairs + 1)) {
>  trace_nvme_dev_err_invalid_create_cq_vector(vector);
>  return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
>  }
> @@ -803,8 +804,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd 
> *cmd, NvmeRequest *req)
>  trace_nvme_dev_getfeat_vwcache(result ? "enabled" : "disabled");
>  break;
>  case NVME_NUMBER_OF_QUEUES:
> -result = cpu_to_le32((n->params.num_queues - 2) |
> - ((n->params.num_queues - 2) << 16));
> +result = cpu_to_le32((n->params.max_ioqpairs - 1) |
> + ((n->params.max_ioqpairs - 1) << 16));
>  trace_nvme_dev_getfeat_numq(result);
>  break;
>  case NVME_TIMESTAMP:
> @@ -848,10 +849,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd 
> *cmd, NvmeRequest *req)
>  case NVME_NUMBER_OF_QUEUES:
>  trace_nvme_dev_setfeat_numq((dw11 & 0x) + 1,
>  ((dw11 >> 16) & 0x) + 1,
> -n->params.num_queues - 1,
> -n->params.num_queues - 1);
> -req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
> -  ((n->params.num_queues - 2) << 16));
> +n->params.max_ioqpairs,
> +n->params.max_ioqpairs);
> +req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
> +  ((n->params.max_ioqpairs - 1) << 16));
>  break;
>  case NVME_TIMESTAMP:
>  return nvme_set_feature_timestamp(n, cmd);
> @@ -924,12 +925,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
>  
>  blk_drain(n->conf.blk);
>  
> -for (i = 0; i < n->params.num_queues; i++) {
> +for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
>  if (n->sq[i] != NULL) {
>  nvme_free_sq(n->sq[i], n);
>  }
>  }
> -for (i = 0; i < n->params.num_queues; i++) {
> +for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
>  if (n->cq[i] != NULL) {
>  nvme_free_cq(n->cq[i], n);
>  }
> @@ -1332,9 +1333,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> **errp)
>  int64_t bs_size;
>  uint8_t *pci_conf;
>  
> -if (!n->params.num_queues) {
> -error_setg(errp, "num_queues can't be zero");
> -return;
> +if (n->params.num_queues) {
> +warn_report("nvme: num_queues is deprecated; please use max_ioqpairs 
> "
> +"instead");
> +
> +n->params.max_ioqpairs = n->params.num_queues - 1;
> +}
> +
> +if (!n->params.max_ioqpairs) {
> +error_setg(errp, "max_ioqpairs can't be less than 1");
>  }
This is not even a nitpick, but just 

[PATCH v6 09/42] nvme: add max_ioqpairs device parameter

2020-03-16 Thread Klaus Jensen
From: Klaus Jensen 

The num_queues device paramater has a slightly confusing meaning because
it accounts for the admin queue pair which is not really optional.
Secondly, it is really a maximum value of queues allowed.

Add a new max_ioqpairs parameter that only accounts for I/O queue pairs,
but keep num_queues for compatibility.

Signed-off-by: Klaus Jensen 
---
 hw/block/nvme.c | 45 ++---
 hw/block/nvme.h |  4 +++-
 2 files changed, 29 insertions(+), 20 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 7cf7cf55143e..7dfd8a1a392d 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -19,7 +19,7 @@
  *  -drive file=,if=none,id=
  *  -device nvme,drive=,serial=,id=, \
  *  cmb_size_mb=, \
- *  num_queues=
+ *  max_ioqpairs=
  *
  * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
  * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
@@ -27,6 +27,7 @@
 
 #include "qemu/osdep.h"
 #include "qemu/units.h"
+#include "qemu/error-report.h"
 #include "hw/block/block.h"
 #include "hw/pci/msix.h"
 #include "hw/pci/pci.h"
@@ -72,12 +73,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void 
*buf, int size)
 
 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
 {
-return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
+return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
 }
 
 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
 {
-return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
+return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
 }
 
 static void nvme_inc_cq_tail(NvmeCQueue *cq)
@@ -639,7 +640,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
 trace_nvme_dev_err_invalid_create_cq_addr(prp1);
 return NVME_INVALID_FIELD | NVME_DNR;
 }
-if (unlikely(vector > n->params.num_queues)) {
+if (unlikely(vector > n->params.max_ioqpairs + 1)) {
 trace_nvme_dev_err_invalid_create_cq_vector(vector);
 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
 }
@@ -803,8 +804,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 trace_nvme_dev_getfeat_vwcache(result ? "enabled" : "disabled");
 break;
 case NVME_NUMBER_OF_QUEUES:
-result = cpu_to_le32((n->params.num_queues - 2) |
- ((n->params.num_queues - 2) << 16));
+result = cpu_to_le32((n->params.max_ioqpairs - 1) |
+ ((n->params.max_ioqpairs - 1) << 16));
 trace_nvme_dev_getfeat_numq(result);
 break;
 case NVME_TIMESTAMP:
@@ -848,10 +849,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd 
*cmd, NvmeRequest *req)
 case NVME_NUMBER_OF_QUEUES:
 trace_nvme_dev_setfeat_numq((dw11 & 0x) + 1,
 ((dw11 >> 16) & 0x) + 1,
-n->params.num_queues - 1,
-n->params.num_queues - 1);
-req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
-  ((n->params.num_queues - 2) << 16));
+n->params.max_ioqpairs,
+n->params.max_ioqpairs);
+req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
+  ((n->params.max_ioqpairs - 1) << 16));
 break;
 case NVME_TIMESTAMP:
 return nvme_set_feature_timestamp(n, cmd);
@@ -924,12 +925,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
 
 blk_drain(n->conf.blk);
 
-for (i = 0; i < n->params.num_queues; i++) {
+for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
 if (n->sq[i] != NULL) {
 nvme_free_sq(n->sq[i], n);
 }
 }
-for (i = 0; i < n->params.num_queues; i++) {
+for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
 if (n->cq[i] != NULL) {
 nvme_free_cq(n->cq[i], n);
 }
@@ -1332,9 +1333,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
**errp)
 int64_t bs_size;
 uint8_t *pci_conf;
 
-if (!n->params.num_queues) {
-error_setg(errp, "num_queues can't be zero");
-return;
+if (n->params.num_queues) {
+warn_report("nvme: num_queues is deprecated; please use max_ioqpairs "
+"instead");
+
+n->params.max_ioqpairs = n->params.num_queues - 1;
+}
+
+if (!n->params.max_ioqpairs) {
+error_setg(errp, "max_ioqpairs can't be less than 1");
 }
 
 if (!n->conf.blk) {
@@ -1365,19 +1372,19 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
**errp)
 pcie_endpoint_cap_init(pci_dev, 0x80);
 
 n->num_namespaces = 1;
-n->reg_size = pow2ceil(0x1004 + 2 * (n->params.num_queues + 1) * 4);
+n->reg_size = pow2ceil(0x1008 + 2 * (n->params.max_ioqpairs) * 4);