Re: [PATCH 01/17] hw/block/nvme: bump spec data structures to v1.3

2020-07-02 Thread Klaus Jensen
On Jul  3 00:44, Dmitry Fomichev wrote:
> On Mon, 2020-06-29 at 20:26 +0200, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > Add missing fields in the Identify Controller and Identify Namespace
> > data structures to bring them in line with NVMe v1.3.
> > 
> > This also adds data structures and defines for SGL support which
> > requires a couple of trivial changes to the nvme block driver as well.
> > 
> > Signed-off-by: Klaus Jensen 
> > Acked-by: Fam Zheng 
> > Reviewed-by: Maxim Levitsky 
> > ---
> >  block/nvme.c |  18 ++---
> >  hw/block/nvme.c  |  12 ++--
> >  include/block/nvme.h | 154 ++-
> >  3 files changed, 152 insertions(+), 32 deletions(-)
> > 
> > diff --git a/block/nvme.c b/block/nvme.c
> > index eb2f54dd9dc9..29e90557c428 100644
> > --- a/block/nvme.c
> > +++ b/block/nvme.c
> > @@ -446,7 +446,7 @@ static void nvme_identify(BlockDriverState *bs, int 
> > namespace, Error **errp)
> >  error_setg(errp, "Cannot map buffer for DMA");
> >  goto out;
> >  }
> > -cmd.prp1 = cpu_to_le64(iova);
> > +cmd.dptr.prp1 = cpu_to_le64(iova);
> >  
> >  if (nvme_cmd_sync(bs, s->queues[0], )) {
> >  error_setg(errp, "Failed to identify controller");
> > @@ -545,7 +545,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, 
> > Error **errp)
> >  }
> >  cmd = (NvmeCmd) {
> >  .opcode = NVME_ADM_CMD_CREATE_CQ,
> > -.prp1 = cpu_to_le64(q->cq.iova),
> > +.dptr.prp1 = cpu_to_le64(q->cq.iova),
> >  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
> >  .cdw11 = cpu_to_le32(0x3),
> >  };
> > @@ -556,7 +556,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, 
> > Error **errp)
> >  }
> >  cmd = (NvmeCmd) {
> >  .opcode = NVME_ADM_CMD_CREATE_SQ,
> > -.prp1 = cpu_to_le64(q->sq.iova),
> > +.dptr.prp1 = cpu_to_le64(q->sq.iova),
> >  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
> >  .cdw11 = cpu_to_le32(0x1 | (n << 16)),
> >  };
> > @@ -904,16 +904,16 @@ try_map:
> >  case 0:
> >  abort();
> >  case 1:
> > -cmd->prp1 = pagelist[0];
> > -cmd->prp2 = 0;
> > +cmd->dptr.prp1 = pagelist[0];
> > +cmd->dptr.prp2 = 0;
> >  break;
> >  case 2:
> > -cmd->prp1 = pagelist[0];
> > -cmd->prp2 = pagelist[1];
> > +cmd->dptr.prp1 = pagelist[0];
> > +cmd->dptr.prp2 = pagelist[1];
> >  break;
> >  default:
> > -cmd->prp1 = pagelist[0];
> > -cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
> > +cmd->dptr.prp1 = pagelist[0];
> > +cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + 
> > sizeof(uint64_t));
> >  break;
> >  }
> >  trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 1aee042d4cb2..71b388aa0e20 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -397,8 +397,8 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
> > NvmeCmd *cmd,
> >  NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
> >  uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
> >  uint64_t slba = le64_to_cpu(rw->slba);
> > -uint64_t prp1 = le64_to_cpu(rw->prp1);
> > -uint64_t prp2 = le64_to_cpu(rw->prp2);
> > +uint64_t prp1 = le64_to_cpu(rw->dptr.prp1);
> > +uint64_t prp2 = le64_to_cpu(rw->dptr.prp2);
> >  
> >  uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
> >  uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
> > @@ -795,8 +795,8 @@ static inline uint64_t nvme_get_timestamp(const 
> > NvmeCtrl *n)
> >  
> >  static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
> >  {
> > -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> > -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> > +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> > +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
> >  
> >  uint64_t timestamp = nvme_get_timestamp(n);
> >  
> > @@ -834,8 +834,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
> > NvmeCmd *cmd)
> >  {
> >  uint16_t ret;
> >  uint64_t timestamp;
> > -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> > -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> > +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> > +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
> >  
> >  ret = nvme_dma_write_prp(n, (uint8_t *),
> >  sizeof(timestamp), prp1, prp2);
> > diff --git a/include/block/nvme.h b/include/block/nvme.h
> > index 1720ee1d5158..6d1fa6ff2228 100644
> > --- a/include/block/nvme.h
> > +++ b/include/block/nvme.h
> > @@ -377,15 +377,53 @@ enum NvmePmrmscMask {
> >  #define NVME_PMRMSC_SET_CBA(pmrmsc, val)   \
> >  (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
> >  
> > +enum NvmeSglDescriptorType {
> > +NVME_SGL_DESCR_TYPE_DATA_BLOCK  = 0x0,
> > +  

Re: [PATCH 01/17] hw/block/nvme: bump spec data structures to v1.3

2020-07-02 Thread Dmitry Fomichev
On Mon, 2020-06-29 at 20:26 +0200, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Add missing fields in the Identify Controller and Identify Namespace
> data structures to bring them in line with NVMe v1.3.
> 
> This also adds data structures and defines for SGL support which
> requires a couple of trivial changes to the nvme block driver as well.
> 
> Signed-off-by: Klaus Jensen 
> Acked-by: Fam Zheng 
> Reviewed-by: Maxim Levitsky 
> ---
>  block/nvme.c |  18 ++---
>  hw/block/nvme.c  |  12 ++--
>  include/block/nvme.h | 154 ++-
>  3 files changed, 152 insertions(+), 32 deletions(-)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index eb2f54dd9dc9..29e90557c428 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -446,7 +446,7 @@ static void nvme_identify(BlockDriverState *bs, int 
> namespace, Error **errp)
>  error_setg(errp, "Cannot map buffer for DMA");
>  goto out;
>  }
> -cmd.prp1 = cpu_to_le64(iova);
> +cmd.dptr.prp1 = cpu_to_le64(iova);
>  
>  if (nvme_cmd_sync(bs, s->queues[0], )) {
>  error_setg(errp, "Failed to identify controller");
> @@ -545,7 +545,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
> **errp)
>  }
>  cmd = (NvmeCmd) {
>  .opcode = NVME_ADM_CMD_CREATE_CQ,
> -.prp1 = cpu_to_le64(q->cq.iova),
> +.dptr.prp1 = cpu_to_le64(q->cq.iova),
>  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
>  .cdw11 = cpu_to_le32(0x3),
>  };
> @@ -556,7 +556,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
> **errp)
>  }
>  cmd = (NvmeCmd) {
>  .opcode = NVME_ADM_CMD_CREATE_SQ,
> -.prp1 = cpu_to_le64(q->sq.iova),
> +.dptr.prp1 = cpu_to_le64(q->sq.iova),
>  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
>  .cdw11 = cpu_to_le32(0x1 | (n << 16)),
>  };
> @@ -904,16 +904,16 @@ try_map:
>  case 0:
>  abort();
>  case 1:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = 0;
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = 0;
>  break;
>  case 2:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = pagelist[1];
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = pagelist[1];
>  break;
>  default:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
>  break;
>  }
>  trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 1aee042d4cb2..71b388aa0e20 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -397,8 +397,8 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
> NvmeCmd *cmd,
>  NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
>  uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
>  uint64_t slba = le64_to_cpu(rw->slba);
> -uint64_t prp1 = le64_to_cpu(rw->prp1);
> -uint64_t prp2 = le64_to_cpu(rw->prp2);
> +uint64_t prp1 = le64_to_cpu(rw->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(rw->dptr.prp2);
>  
>  uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
>  uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
> @@ -795,8 +795,8 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl 
> *n)
>  
>  static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
>  {
> -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
>  uint64_t timestamp = nvme_get_timestamp(n);
>  
> @@ -834,8 +834,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
> NvmeCmd *cmd)
>  {
>  uint16_t ret;
>  uint64_t timestamp;
> -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
>  ret = nvme_dma_write_prp(n, (uint8_t *),
>  sizeof(timestamp), prp1, prp2);
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index 1720ee1d5158..6d1fa6ff2228 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -377,15 +377,53 @@ enum NvmePmrmscMask {
>  #define NVME_PMRMSC_SET_CBA(pmrmsc, val)   \
>  (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
>  
> +enum NvmeSglDescriptorType {
> +NVME_SGL_DESCR_TYPE_DATA_BLOCK  = 0x0,
> +NVME_SGL_DESCR_TYPE_BIT_BUCKET  = 0x1,
> +NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
> +NVME_SGL_DESCR_TYPE_LAST_SEGMENT= 0x3,
> +NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK= 0x4,
> +
> +NVME_SGL_DESCR_TYPE_VENDOR_SPECIFIC = 0xf,
> +};
> +
> +enum 

[PATCH 01/17] hw/block/nvme: bump spec data structures to v1.3

2020-06-29 Thread Klaus Jensen
From: Klaus Jensen 

Add missing fields in the Identify Controller and Identify Namespace
data structures to bring them in line with NVMe v1.3.

This also adds data structures and defines for SGL support which
requires a couple of trivial changes to the nvme block driver as well.

Signed-off-by: Klaus Jensen 
Acked-by: Fam Zheng 
Reviewed-by: Maxim Levitsky 
---
 block/nvme.c |  18 ++---
 hw/block/nvme.c  |  12 ++--
 include/block/nvme.h | 154 ++-
 3 files changed, 152 insertions(+), 32 deletions(-)

diff --git a/block/nvme.c b/block/nvme.c
index eb2f54dd9dc9..29e90557c428 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -446,7 +446,7 @@ static void nvme_identify(BlockDriverState *bs, int 
namespace, Error **errp)
 error_setg(errp, "Cannot map buffer for DMA");
 goto out;
 }
-cmd.prp1 = cpu_to_le64(iova);
+cmd.dptr.prp1 = cpu_to_le64(iova);
 
 if (nvme_cmd_sync(bs, s->queues[0], )) {
 error_setg(errp, "Failed to identify controller");
@@ -545,7 +545,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
**errp)
 }
 cmd = (NvmeCmd) {
 .opcode = NVME_ADM_CMD_CREATE_CQ,
-.prp1 = cpu_to_le64(q->cq.iova),
+.dptr.prp1 = cpu_to_le64(q->cq.iova),
 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
 .cdw11 = cpu_to_le32(0x3),
 };
@@ -556,7 +556,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
**errp)
 }
 cmd = (NvmeCmd) {
 .opcode = NVME_ADM_CMD_CREATE_SQ,
-.prp1 = cpu_to_le64(q->sq.iova),
+.dptr.prp1 = cpu_to_le64(q->sq.iova),
 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
 .cdw11 = cpu_to_le32(0x1 | (n << 16)),
 };
@@ -904,16 +904,16 @@ try_map:
 case 0:
 abort();
 case 1:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = 0;
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = 0;
 break;
 case 2:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = pagelist[1];
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = pagelist[1];
 break;
 default:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
 break;
 }
 trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 1aee042d4cb2..71b388aa0e20 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -397,8 +397,8 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
 uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
 uint64_t slba = le64_to_cpu(rw->slba);
-uint64_t prp1 = le64_to_cpu(rw->prp1);
-uint64_t prp2 = le64_to_cpu(rw->prp2);
+uint64_t prp1 = le64_to_cpu(rw->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(rw->dptr.prp2);
 
 uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
 uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
@@ -795,8 +795,8 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
 
 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
 {
-uint64_t prp1 = le64_to_cpu(cmd->prp1);
-uint64_t prp2 = le64_to_cpu(cmd->prp2);
+uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
 
 uint64_t timestamp = nvme_get_timestamp(n);
 
@@ -834,8 +834,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
NvmeCmd *cmd)
 {
 uint16_t ret;
 uint64_t timestamp;
-uint64_t prp1 = le64_to_cpu(cmd->prp1);
-uint64_t prp2 = le64_to_cpu(cmd->prp2);
+uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
 
 ret = nvme_dma_write_prp(n, (uint8_t *),
 sizeof(timestamp), prp1, prp2);
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 1720ee1d5158..6d1fa6ff2228 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -377,15 +377,53 @@ enum NvmePmrmscMask {
 #define NVME_PMRMSC_SET_CBA(pmrmsc, val)   \
 (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
 
+enum NvmeSglDescriptorType {
+NVME_SGL_DESCR_TYPE_DATA_BLOCK  = 0x0,
+NVME_SGL_DESCR_TYPE_BIT_BUCKET  = 0x1,
+NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
+NVME_SGL_DESCR_TYPE_LAST_SEGMENT= 0x3,
+NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK= 0x4,
+
+NVME_SGL_DESCR_TYPE_VENDOR_SPECIFIC = 0xf,
+};
+
+enum NvmeSglDescriptorSubtype {
+NVME_SGL_DESCR_SUBTYPE_ADDRESS = 0x0,
+};
+
+typedef struct NvmeSglDescriptor {
+uint64_t addr;
+uint32_t len;
+uint8_t  rsvd[3];
+uint8_t  type;
+} NvmeSglDescriptor;
+
+#define NVME_SGL_TYPE(type) ((type >> 4) & 0xf)
+#define NVME_SGL_SUBTYPE(type)  (type & 0xf)
+
+typedef