Re: [PATCH v2 12/20] nvme: bump supported specification version to 1.3

2019-11-26 Thread Klaus Birkelund
On Mon, Nov 25, 2019 at 12:13:15PM +, Beata Michalska wrote:
> On Mon, 18 Nov 2019 at 09:48, Klaus Birkelund  wrote:
> >
> > On Tue, Nov 12, 2019 at 03:05:06PM +, Beata Michalska wrote:
> > > Hi Klaus,
> > >
> > > On Tue, 15 Oct 2019 at 11:52, Klaus Jensen  wrote:
> > > >
> > > > +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
> > > > +{
> > > > +static const int len = 4096;
> > > > +
> > > > +struct ns_descr {
> > > > +uint8_t nidt;
> > > > +uint8_t nidl;
> > > > +uint8_t rsvd2[2];
> > > > +uint8_t nid[16];
> > > > +};
> > > > +
> > > > +uint32_t nsid = le32_to_cpu(c->nsid);
> > > > +uint64_t prp1 = le64_to_cpu(c->prp1);
> > > > +uint64_t prp2 = le64_to_cpu(c->prp2);
> > > > +
> > > > +struct ns_descr *list;
> > > > +uint16_t ret;
> > > > +
> > > > +trace_nvme_identify_ns_descr_list(nsid);
> > > > +
> > > > +if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
> > > > +trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
> > > > +return NVME_INVALID_NSID | NVME_DNR;
> > > > +}
> > > > +
> > > In theory this should abort the command for inactive NSIDs as well.
> > > But I guess this will come later on.
> > >
> >
> > At this point in the series, the device does not support multiple
> > namespaces anyway and num_namespaces is always 1. But this has also been
> > reported seperately in relation the patch adding multiple namespaces and
> > is fixed in v3.
> >
> > > > +list = g_malloc0(len);
> > > > +list->nidt = 0x3;
> > > > +list->nidl = 0x10;
> > > > +*(uint32_t *) >nid[12] = cpu_to_be32(nsid);
> > > > +
> > > Might be worth to add some comment here -> as per the NGUID/EUI64 format.
> > > Also those are not specified currently in the namespace identity data 
> > > structure.
> > >
> >
> > I'll add a comment for why the Namespace UUID is set to this value here.
> > The NGUID/EUI64 fields are not set in the namespace identity data
> > structure as they are not required. See the descriptions of NGUID and
> > EUI64. Here for NGUID:
> >
> > "The controller shall specify a globally unique namespace identifier
> > in this field, the EUI64 field, or a Namespace UUID in the Namespace
> > Identification Descriptor..."
> >
> > Here, I chose to provide it in the Namespace Identification Descriptor
> > (by setting `list->nidt = 0x3`).
> >
> > > > +ret = nvme_dma_read_prp(n, (uint8_t *) list, len, prp1, prp2);
> > > > +g_free(list);
> > > > +return ret;
> > > > +}
> > > > +
> > > >  static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> > > >  {
> > > >  NvmeIdentify *c = (NvmeIdentify *)cmd;
> > > > @@ -934,7 +978,9 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd 
> > > > *cmd)
> > > >  case 0x01:
> > > >  return nvme_identify_ctrl(n, c);
> > > >  case 0x02:
> > > > -return nvme_identify_nslist(n, c);
> > > > +return nvme_identify_ns_list(n, c);
> > > > +case 0x03:
> > > > +return nvme_identify_ns_descr_list(n, cmd);
> > > >  default:
> > > >  trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
> > > >  return NVME_INVALID_FIELD | NVME_DNR;
> > > > @@ -1101,6 +1147,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, 
> > > > NvmeCmd *cmd, NvmeRequest *req)
> > > >  blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
> > > >  break;
> > > >  case NVME_NUMBER_OF_QUEUES:
> > > > +if (n->qs_created > 2) {
> > > > +return NVME_CMD_SEQ_ERROR | NVME_DNR;
> > > > +}
> > > > +
> > > I am not sure this is entirely correct as the spec says:
> > > "if any I/O Submission and/or Completion Queues (...)"
> > > so it might be enough to have a single queue created
> > > for this command to be valid.
> > > Also I think that the condition here is to make sure that the number
> > > of queues requested is being set once at init phase. Currently this will
> > > allow the setting to happen if there is no active queue -> so at any
> > > point of time (provided the condition mentioned). I might be wrong here
> > > but it seems that what we need is a single status saying any queue
> > > has been created prior to the Set Feature command at all
> > >
> >
> > Internally, the admin queue pair is counted in qs_created, which is the
> > reason for checking if is above 2. The admin queues are created when the
> > controller is enabled (mmio write to the EN register in CC).
> >
> > I'll add a comment about that - I see why it is unclear.
> >
> 
> Ok, so indeed I have missed the fact that the admin queues are being tracked 
> by
> 'qs_created'. Still, I might be wrong, but, it is enough to create I/O
> submission queue and delete it and the code will allow the command to proceed
> Whereas the spec says :
> "If a Set Features command is issued for this feature after creation of
> any I/O Submission and/or I/O Completion Queues, then the Set Features
> command shall 

Re: [PATCH v2 12/20] nvme: bump supported specification version to 1.3

2019-11-25 Thread Beata Michalska
On Mon, 18 Nov 2019 at 09:48, Klaus Birkelund  wrote:
>
> On Tue, Nov 12, 2019 at 03:05:06PM +, Beata Michalska wrote:
> > Hi Klaus,
> >
> > On Tue, 15 Oct 2019 at 11:52, Klaus Jensen  wrote:
> > >
> > > +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
> > > +{
> > > +static const int len = 4096;
> > > +
> > > +struct ns_descr {
> > > +uint8_t nidt;
> > > +uint8_t nidl;
> > > +uint8_t rsvd2[2];
> > > +uint8_t nid[16];
> > > +};
> > > +
> > > +uint32_t nsid = le32_to_cpu(c->nsid);
> > > +uint64_t prp1 = le64_to_cpu(c->prp1);
> > > +uint64_t prp2 = le64_to_cpu(c->prp2);
> > > +
> > > +struct ns_descr *list;
> > > +uint16_t ret;
> > > +
> > > +trace_nvme_identify_ns_descr_list(nsid);
> > > +
> > > +if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
> > > +trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
> > > +return NVME_INVALID_NSID | NVME_DNR;
> > > +}
> > > +
> > In theory this should abort the command for inactive NSIDs as well.
> > But I guess this will come later on.
> >
>
> At this point in the series, the device does not support multiple
> namespaces anyway and num_namespaces is always 1. But this has also been
> reported seperately in relation the patch adding multiple namespaces and
> is fixed in v3.
>
> > > +list = g_malloc0(len);
> > > +list->nidt = 0x3;
> > > +list->nidl = 0x10;
> > > +*(uint32_t *) >nid[12] = cpu_to_be32(nsid);
> > > +
> > Might be worth to add some comment here -> as per the NGUID/EUI64 format.
> > Also those are not specified currently in the namespace identity data 
> > structure.
> >
>
> I'll add a comment for why the Namespace UUID is set to this value here.
> The NGUID/EUI64 fields are not set in the namespace identity data
> structure as they are not required. See the descriptions of NGUID and
> EUI64. Here for NGUID:
>
> "The controller shall specify a globally unique namespace identifier
> in this field, the EUI64 field, or a Namespace UUID in the Namespace
> Identification Descriptor..."
>
> Here, I chose to provide it in the Namespace Identification Descriptor
> (by setting `list->nidt = 0x3`).
>
> > > +ret = nvme_dma_read_prp(n, (uint8_t *) list, len, prp1, prp2);
> > > +g_free(list);
> > > +return ret;
> > > +}
> > > +
> > >  static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> > >  {
> > >  NvmeIdentify *c = (NvmeIdentify *)cmd;
> > > @@ -934,7 +978,9 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd 
> > > *cmd)
> > >  case 0x01:
> > >  return nvme_identify_ctrl(n, c);
> > >  case 0x02:
> > > -return nvme_identify_nslist(n, c);
> > > +return nvme_identify_ns_list(n, c);
> > > +case 0x03:
> > > +return nvme_identify_ns_descr_list(n, cmd);
> > >  default:
> > >  trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
> > >  return NVME_INVALID_FIELD | NVME_DNR;
> > > @@ -1101,6 +1147,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, 
> > > NvmeCmd *cmd, NvmeRequest *req)
> > >  blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
> > >  break;
> > >  case NVME_NUMBER_OF_QUEUES:
> > > +if (n->qs_created > 2) {
> > > +return NVME_CMD_SEQ_ERROR | NVME_DNR;
> > > +}
> > > +
> > I am not sure this is entirely correct as the spec says:
> > "if any I/O Submission and/or Completion Queues (...)"
> > so it might be enough to have a single queue created
> > for this command to be valid.
> > Also I think that the condition here is to make sure that the number
> > of queues requested is being set once at init phase. Currently this will
> > allow the setting to happen if there is no active queue -> so at any
> > point of time (provided the condition mentioned). I might be wrong here
> > but it seems that what we need is a single status saying any queue
> > has been created prior to the Set Feature command at all
> >
>
> Internally, the admin queue pair is counted in qs_created, which is the
> reason for checking if is above 2. The admin queues are created when the
> controller is enabled (mmio write to the EN register in CC).
>
> I'll add a comment about that - I see why it is unclear.
>

Ok, so indeed I have missed the fact that the admin queues are being tracked by
'qs_created'. Still, I might be wrong, but, it is enough to create I/O
submission queue and delete it and the code will allow the command to proceed
Whereas the spec says :
"If a Set Features command is issued for this feature after creation of
any I/O Submission and/or I/O Completion Queues, then the Set Features
command shall fail with status code of Command Sequence Error"

I might be misreading it though but it is not entirely clear to me whether
this is : at least one queue create ... or created and in use.

But I guess that is really minor.

BR
Beata


> >
> > Small note: this patch seems to be introducing more changes
> 

Re: [PATCH v2 12/20] nvme: bump supported specification version to 1.3

2019-11-18 Thread Klaus Birkelund
On Tue, Nov 12, 2019 at 03:05:06PM +, Beata Michalska wrote:
> Hi Klaus,
> 
> On Tue, 15 Oct 2019 at 11:52, Klaus Jensen  wrote:
> >
> > +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
> > +{
> > +static const int len = 4096;
> > +
> > +struct ns_descr {
> > +uint8_t nidt;
> > +uint8_t nidl;
> > +uint8_t rsvd2[2];
> > +uint8_t nid[16];
> > +};
> > +
> > +uint32_t nsid = le32_to_cpu(c->nsid);
> > +uint64_t prp1 = le64_to_cpu(c->prp1);
> > +uint64_t prp2 = le64_to_cpu(c->prp2);
> > +
> > +struct ns_descr *list;
> > +uint16_t ret;
> > +
> > +trace_nvme_identify_ns_descr_list(nsid);
> > +
> > +if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
> > +trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
> > +return NVME_INVALID_NSID | NVME_DNR;
> > +}
> > +
> In theory this should abort the command for inactive NSIDs as well.
> But I guess this will come later on.
> 

At this point in the series, the device does not support multiple
namespaces anyway and num_namespaces is always 1. But this has also been
reported seperately in relation the patch adding multiple namespaces and
is fixed in v3.

> > +list = g_malloc0(len);
> > +list->nidt = 0x3;
> > +list->nidl = 0x10;
> > +*(uint32_t *) >nid[12] = cpu_to_be32(nsid);
> > +
> Might be worth to add some comment here -> as per the NGUID/EUI64 format.
> Also those are not specified currently in the namespace identity data 
> structure.
> 

I'll add a comment for why the Namespace UUID is set to this value here.
The NGUID/EUI64 fields are not set in the namespace identity data
structure as they are not required. See the descriptions of NGUID and
EUI64. Here for NGUID:

"The controller shall specify a globally unique namespace identifier
in this field, the EUI64 field, or a Namespace UUID in the Namespace
Identification Descriptor..."

Here, I chose to provide it in the Namespace Identification Descriptor
(by setting `list->nidt = 0x3`).

> > +ret = nvme_dma_read_prp(n, (uint8_t *) list, len, prp1, prp2);
> > +g_free(list);
> > +return ret;
> > +}
> > +
> >  static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> >  {
> >  NvmeIdentify *c = (NvmeIdentify *)cmd;
> > @@ -934,7 +978,9 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> >  case 0x01:
> >  return nvme_identify_ctrl(n, c);
> >  case 0x02:
> > -return nvme_identify_nslist(n, c);
> > +return nvme_identify_ns_list(n, c);
> > +case 0x03:
> > +return nvme_identify_ns_descr_list(n, cmd);
> >  default:
> >  trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
> >  return NVME_INVALID_FIELD | NVME_DNR;
> > @@ -1101,6 +1147,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, 
> > NvmeCmd *cmd, NvmeRequest *req)
> >  blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
> >  break;
> >  case NVME_NUMBER_OF_QUEUES:
> > +if (n->qs_created > 2) {
> > +return NVME_CMD_SEQ_ERROR | NVME_DNR;
> > +}
> > +
> I am not sure this is entirely correct as the spec says:
> "if any I/O Submission and/or Completion Queues (...)"
> so it might be enough to have a single queue created
> for this command to be valid.
> Also I think that the condition here is to make sure that the number
> of queues requested is being set once at init phase. Currently this will
> allow the setting to happen if there is no active queue -> so at any
> point of time (provided the condition mentioned). I might be wrong here
> but it seems that what we need is a single status saying any queue
> has been created prior to the Set Feature command at all
> 

Internally, the admin queue pair is counted in qs_created, which is the
reason for checking if is above 2. The admin queues are created when the
controller is enabled (mmio write to the EN register in CC).

I'll add a comment about that - I see why it is unclear.

> 
> Small note: this patch seems to be introducing more changes
> than specified in the commit message and especially the subject. Might
> be worth to extend it a bit.
> 

You are right. I'll split it up.



Re: [PATCH v2 12/20] nvme: bump supported specification version to 1.3

2019-11-12 Thread Beata Michalska
Hi Klaus,

On Tue, 15 Oct 2019 at 11:52, Klaus Jensen  wrote:
>
> Add the new Namespace Identification Descriptor List (CNS 03h) and track
> creation of queues to enable the controller to return Command Sequence
> Error if Set Features is called for Number of Queues after any queues
> have been created.
>
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c   | 82 +++
>  hw/block/nvme.h   |  1 +
>  hw/block/trace-events |  8 +++--
>  include/block/nvme.h  | 30 +---
>  4 files changed, 100 insertions(+), 21 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index e7d46dcc6afe..1e2320b38b14 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -9,20 +9,22 @@
>   */
>
>  /**
> - * Reference Specification: NVM Express 1.2.1
> + * Reference Specification: NVM Express 1.3d
>   *
>   *   https://nvmexpress.org/resources/specifications/
>   */
>
>  /**
>   * Usage: add options:
> - *  -drive file=,if=none,id=
> - *  -device nvme,drive=,serial=,id=, \
> - *  cmb_size_mb=, \
> - *  num_queues=
> + * -drive file=,if=none,id=
> + * -device nvme,drive=,serial=,id=
>   *
> - * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
> - * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
> + * Advanced optional options:
> + *
> + *   num_queues=  : Maximum number of IO Queues.
> + *  Default: 64
> + *   cmb_size_mb= : Size of Controller Memory Buffer in MBs.
> + *  Default: 0 (disabled)
>   */
>
>  #include "qemu/osdep.h"
> @@ -345,6 +347,8 @@ static void nvme_post_cqes(void *opaque)
>  static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
>  {
>  assert(cq->cqid == req->sq->cqid);
> +
> +trace_nvme_enqueue_req_completion(req->cid, cq->cqid, req->status);
>  QTAILQ_REMOVE(>sq->out_req_list, req, entry);
>  QTAILQ_INSERT_TAIL(>req_list, req, entry);
>  timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
> @@ -530,6 +534,7 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
>  if (sq->sqid) {
>  g_free(sq);
>  }
> +n->qs_created--;
>  }
>
>  static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -596,6 +601,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, 
> uint64_t dma_addr,
>  cq = n->cq[cqid];
>  QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
>  n->sq[sqid] = sq;
> +n->qs_created++;
>  }
>
>  static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -742,7 +748,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, 
> NvmeRequest *req)
>  uint32_t dw11 = le32_to_cpu(cmd->cdw11);
>  uint32_t dw12 = le32_to_cpu(cmd->cdw12);
>  uint32_t dw13 = le32_to_cpu(cmd->cdw13);
> -uint16_t lid = dw10 & 0xff;
> +uint8_t  lid = dw10 & 0xff;
> +uint8_t  lsp = (dw10 >> 8) & 0xf;
>  uint8_t  rae = (dw10 >> 15) & 0x1;
>  uint32_t numdl, numdu;
>  uint64_t off, lpol, lpou;
> @@ -760,7 +767,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, 
> NvmeRequest *req)
>  return NVME_INVALID_FIELD | NVME_DNR;
>  }
>
> -trace_nvme_get_log(req->cid, lid, rae, len, off);
> +trace_nvme_get_log(req->cid, lid, lsp, rae, len, off);
>
>  switch (lid) {
>  case NVME_LOG_ERROR_INFO:
> @@ -784,6 +791,7 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
>  if (cq->cqid) {
>  g_free(cq);
>  }
> +n->qs_created--;
>  }
>
>  static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -824,6 +832,7 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, 
> uint64_t dma_addr,
>  msix_vector_use(>parent_obj, cq->vector);
>  n->cq[cqid] = cq;
>  cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
> +n->qs_created++;
>  }
>
>  static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -897,7 +906,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
> NvmeIdentify *c)
>  prp1, prp2);
>  }
>
> -static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
> +static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeIdentify *c)
>  {
>  static const int data_len = 4 * KiB;
>  uint32_t min_nsid = le32_to_cpu(c->nsid);
> @@ -907,7 +916,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, 
> NvmeIdentify *c)
>  uint16_t ret;
>  int i, j = 0;
>
> -trace_nvme_identify_nslist(min_nsid);
> +trace_nvme_identify_ns_list(min_nsid);
>
>  list = g_malloc0(data_len);
>  for (i = 0; i < n->num_namespaces; i++) {
> @@ -924,6 +933,41 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, 
> NvmeIdentify *c)
>  return ret;
>  }
>
> +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
> +{
> +static const int len = 4096;
> +
> +struct ns_descr {
> +uint8_t nidt;
> +uint8_t nidl;
> +uint8_t rsvd2[2];
> +uint8_t nid[16];
> +};

[PATCH v2 12/20] nvme: bump supported specification version to 1.3

2019-10-15 Thread Klaus Jensen
Add the new Namespace Identification Descriptor List (CNS 03h) and track
creation of queues to enable the controller to return Command Sequence
Error if Set Features is called for Number of Queues after any queues
have been created.

Signed-off-by: Klaus Jensen 
---
 hw/block/nvme.c   | 82 +++
 hw/block/nvme.h   |  1 +
 hw/block/trace-events |  8 +++--
 include/block/nvme.h  | 30 +---
 4 files changed, 100 insertions(+), 21 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e7d46dcc6afe..1e2320b38b14 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -9,20 +9,22 @@
  */
 
 /**
- * Reference Specification: NVM Express 1.2.1
+ * Reference Specification: NVM Express 1.3d
  *
  *   https://nvmexpress.org/resources/specifications/
  */
 
 /**
  * Usage: add options:
- *  -drive file=,if=none,id=
- *  -device nvme,drive=,serial=,id=, \
- *  cmb_size_mb=, \
- *  num_queues=
+ * -drive file=,if=none,id=
+ * -device nvme,drive=,serial=,id=
  *
- * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
- * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
+ * Advanced optional options:
+ *
+ *   num_queues=  : Maximum number of IO Queues.
+ *  Default: 64
+ *   cmb_size_mb= : Size of Controller Memory Buffer in MBs.
+ *  Default: 0 (disabled)
  */
 
 #include "qemu/osdep.h"
@@ -345,6 +347,8 @@ static void nvme_post_cqes(void *opaque)
 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
 {
 assert(cq->cqid == req->sq->cqid);
+
+trace_nvme_enqueue_req_completion(req->cid, cq->cqid, req->status);
 QTAILQ_REMOVE(>sq->out_req_list, req, entry);
 QTAILQ_INSERT_TAIL(>req_list, req, entry);
 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
@@ -530,6 +534,7 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
 if (sq->sqid) {
 g_free(sq);
 }
+n->qs_created--;
 }
 
 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -596,6 +601,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, 
uint64_t dma_addr,
 cq = n->cq[cqid];
 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
 n->sq[sqid] = sq;
+n->qs_created++;
 }
 
 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -742,7 +748,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
 uint32_t dw12 = le32_to_cpu(cmd->cdw12);
 uint32_t dw13 = le32_to_cpu(cmd->cdw13);
-uint16_t lid = dw10 & 0xff;
+uint8_t  lid = dw10 & 0xff;
+uint8_t  lsp = (dw10 >> 8) & 0xf;
 uint8_t  rae = (dw10 >> 15) & 0x1;
 uint32_t numdl, numdu;
 uint64_t off, lpol, lpou;
@@ -760,7 +767,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 return NVME_INVALID_FIELD | NVME_DNR;
 }
 
-trace_nvme_get_log(req->cid, lid, rae, len, off);
+trace_nvme_get_log(req->cid, lid, lsp, rae, len, off);
 
 switch (lid) {
 case NVME_LOG_ERROR_INFO:
@@ -784,6 +791,7 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
 if (cq->cqid) {
 g_free(cq);
 }
+n->qs_created--;
 }
 
 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -824,6 +832,7 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, 
uint64_t dma_addr,
 msix_vector_use(>parent_obj, cq->vector);
 n->cq[cqid] = cq;
 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
+n->qs_created++;
 }
 
 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -897,7 +906,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify 
*c)
 prp1, prp2);
 }
 
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
+static uint16_t nvme_identify_ns_list(NvmeCtrl *n, NvmeIdentify *c)
 {
 static const int data_len = 4 * KiB;
 uint32_t min_nsid = le32_to_cpu(c->nsid);
@@ -907,7 +916,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, 
NvmeIdentify *c)
 uint16_t ret;
 int i, j = 0;
 
-trace_nvme_identify_nslist(min_nsid);
+trace_nvme_identify_ns_list(min_nsid);
 
 list = g_malloc0(data_len);
 for (i = 0; i < n->num_namespaces; i++) {
@@ -924,6 +933,41 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, 
NvmeIdentify *c)
 return ret;
 }
 
+static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeCmd *c)
+{
+static const int len = 4096;
+
+struct ns_descr {
+uint8_t nidt;
+uint8_t nidl;
+uint8_t rsvd2[2];
+uint8_t nid[16];
+};
+
+uint32_t nsid = le32_to_cpu(c->nsid);
+uint64_t prp1 = le64_to_cpu(c->prp1);
+uint64_t prp2 = le64_to_cpu(c->prp2);
+
+struct ns_descr *list;
+uint16_t ret;
+
+trace_nvme_identify_ns_descr_list(nsid);
+
+if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+