Re: [PATCH v5 03/26] nvme: move device parameters to separate struct

2020-02-12 Thread Maxim Levitsky
On Tue, 2020-02-04 at 10:51 +0100, Klaus Jensen wrote:
> Move device configuration parameters to separate struct to make it
> explicit what is configurable and what is set internally.
> 
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c | 44 ++--
>  hw/block/nvme.h | 16 +---
>  2 files changed, 35 insertions(+), 25 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index c9ad6aaa5f95..f05ebcce3f53 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -64,12 +64,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void 
> *buf, int size)
>  
>  static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
>  {
> -return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1;
> +return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
>  }
>  
>  static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
>  {
> -return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1;
> +return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
>  }
>  
>  static void nvme_inc_cq_tail(NvmeCQueue *cq)
> @@ -631,7 +631,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
>  trace_nvme_dev_err_invalid_create_cq_addr(prp1);
>  return NVME_INVALID_FIELD | NVME_DNR;
>  }
> -if (unlikely(vector > n->num_queues)) {
> +if (unlikely(vector > n->params.num_queues)) {
>  trace_nvme_dev_err_invalid_create_cq_vector(vector);
>  return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
>  }
> @@ -783,7 +783,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd 
> *cmd, NvmeRequest *req)
>  trace_nvme_dev_getfeat_vwcache(result ? "enabled" : "disabled");
>  break;
>  case NVME_NUMBER_OF_QUEUES:
> -result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 
> 16));
> +result = cpu_to_le32((n->params.num_queues - 2) |
> +((n->params.num_queues - 2) << 16));
Line wrapping issue.

>  trace_nvme_dev_getfeat_numq(result);
>  break;
>  case NVME_TIMESTAMP:
> @@ -826,9 +827,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd 
> *cmd, NvmeRequest *req)
>  break;
>  case NVME_NUMBER_OF_QUEUES:
>  trace_nvme_dev_setfeat_numq((dw11 & 0x) + 1,
> -((dw11 >> 16) & 0x) + 1, n->num_queues - 1, n->num_queues - 
> 1);
> -req->cqe.result =
> -cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
> +((dw11 >> 16) & 0x) + 1, n->params.num_queues - 1,
> +n->params.num_queues - 1);
> +req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
> +((n->params.num_queues - 2) << 16));
Here as well, and there are more probably.
>  break;
>  case NVME_TIMESTAMP:
>  return nvme_set_feature_timestamp(n, cmd);
> @@ -899,12 +901,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
>  
>  blk_drain(n->conf.blk);
>  
> -for (i = 0; i < n->num_queues; i++) {
> +for (i = 0; i < n->params.num_queues; i++) {
>  if (n->sq[i] != NULL) {
>  nvme_free_sq(n->sq[i], n);
>  }
>  }
> -for (i = 0; i < n->num_queues; i++) {
> +for (i = 0; i < n->params.num_queues; i++) {
>  if (n->cq[i] != NULL) {
>  nvme_free_cq(n->cq[i], n);
>  }
> @@ -1307,7 +1309,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> **errp)
>  int64_t bs_size;
>  uint8_t *pci_conf;
>  
> -if (!n->num_queues) {
> +if (!n->params.num_queues) {
>  error_setg(errp, "num_queues can't be zero");
>  return;
>  }
> @@ -1323,7 +1325,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> **errp)
>  return;
>  }
>  
> -if (!n->serial) {
> +if (!n->params.serial) {
>  error_setg(errp, "serial property not set");
>  return;
>  }
> @@ -1340,25 +1342,25 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
> **errp)
>  pcie_endpoint_cap_init(pci_dev, 0x80);
>  
>  n->num_namespaces = 1;
> -n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
> +n->reg_size = pow2ceil(0x1004 + 2 * (n->params.num_queues + 1) * 4);
>  n->ns_size = bs_size / (uint64_t)n->num_namespaces;
>  
>  n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
> -n->sq = g_new0(NvmeSQueue *, n->num_queues);
> -n->cq = g_new0(NvmeCQueue *, n->num_queues);
> +n->sq = g_new0(NvmeSQueue *, n->params.num_queues);
> +n->cq = g_new0(NvmeCQueue *, n->params.num_queues);
>  
>  memory_region_init_io(>iomem, OBJECT(n), _mmio_ops, n,
>"nvme", n->reg_size);
>  pci_register_bar(pci_dev, 0,
>  PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
>  >iomem);
> -msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL);
> +msix_init_exclusive_bar(pci_dev, n->params.num_queues, 4, NULL);
>  
>  id->vid = 

[PATCH v5 03/26] nvme: move device parameters to separate struct

2020-02-04 Thread Klaus Jensen
Move device configuration parameters to separate struct to make it
explicit what is configurable and what is set internally.

Signed-off-by: Klaus Jensen 
---
 hw/block/nvme.c | 44 ++--
 hw/block/nvme.h | 16 +---
 2 files changed, 35 insertions(+), 25 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index c9ad6aaa5f95..f05ebcce3f53 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -64,12 +64,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void 
*buf, int size)
 
 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
 {
-return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1;
+return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
 }
 
 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
 {
-return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1;
+return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
 }
 
 static void nvme_inc_cq_tail(NvmeCQueue *cq)
@@ -631,7 +631,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
 trace_nvme_dev_err_invalid_create_cq_addr(prp1);
 return NVME_INVALID_FIELD | NVME_DNR;
 }
-if (unlikely(vector > n->num_queues)) {
+if (unlikely(vector > n->params.num_queues)) {
 trace_nvme_dev_err_invalid_create_cq_vector(vector);
 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
 }
@@ -783,7 +783,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 trace_nvme_dev_getfeat_vwcache(result ? "enabled" : "disabled");
 break;
 case NVME_NUMBER_OF_QUEUES:
-result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 
16));
+result = cpu_to_le32((n->params.num_queues - 2) |
+((n->params.num_queues - 2) << 16));
 trace_nvme_dev_getfeat_numq(result);
 break;
 case NVME_TIMESTAMP:
@@ -826,9 +827,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd 
*cmd, NvmeRequest *req)
 break;
 case NVME_NUMBER_OF_QUEUES:
 trace_nvme_dev_setfeat_numq((dw11 & 0x) + 1,
-((dw11 >> 16) & 0x) + 1, n->num_queues - 1, n->num_queues - 1);
-req->cqe.result =
-cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
+((dw11 >> 16) & 0x) + 1, n->params.num_queues - 1,
+n->params.num_queues - 1);
+req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
+((n->params.num_queues - 2) << 16));
 break;
 case NVME_TIMESTAMP:
 return nvme_set_feature_timestamp(n, cmd);
@@ -899,12 +901,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
 
 blk_drain(n->conf.blk);
 
-for (i = 0; i < n->num_queues; i++) {
+for (i = 0; i < n->params.num_queues; i++) {
 if (n->sq[i] != NULL) {
 nvme_free_sq(n->sq[i], n);
 }
 }
-for (i = 0; i < n->num_queues; i++) {
+for (i = 0; i < n->params.num_queues; i++) {
 if (n->cq[i] != NULL) {
 nvme_free_cq(n->cq[i], n);
 }
@@ -1307,7 +1309,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
 int64_t bs_size;
 uint8_t *pci_conf;
 
-if (!n->num_queues) {
+if (!n->params.num_queues) {
 error_setg(errp, "num_queues can't be zero");
 return;
 }
@@ -1323,7 +1325,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
 return;
 }
 
-if (!n->serial) {
+if (!n->params.serial) {
 error_setg(errp, "serial property not set");
 return;
 }
@@ -1340,25 +1342,25 @@ static void nvme_realize(PCIDevice *pci_dev, Error 
**errp)
 pcie_endpoint_cap_init(pci_dev, 0x80);
 
 n->num_namespaces = 1;
-n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
+n->reg_size = pow2ceil(0x1004 + 2 * (n->params.num_queues + 1) * 4);
 n->ns_size = bs_size / (uint64_t)n->num_namespaces;
 
 n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
-n->sq = g_new0(NvmeSQueue *, n->num_queues);
-n->cq = g_new0(NvmeCQueue *, n->num_queues);
+n->sq = g_new0(NvmeSQueue *, n->params.num_queues);
+n->cq = g_new0(NvmeCQueue *, n->params.num_queues);
 
 memory_region_init_io(>iomem, OBJECT(n), _mmio_ops, n,
   "nvme", n->reg_size);
 pci_register_bar(pci_dev, 0,
 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
 >iomem);
-msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL);
+msix_init_exclusive_bar(pci_dev, n->params.num_queues, 4, NULL);
 
 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
-strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' ');
+strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, '