Re: [PATCH V2 6/7] hw/block/nvme: support namespace attachment command

2021-02-26 Thread Minwoo Im
On 21-02-27 02:59:35, Keith Busch wrote:
> On Thu, Feb 11, 2021 at 01:09:36AM +0900, Minwoo Im wrote:
> > @@ -183,6 +183,7 @@ static const uint32_t nvme_cse_acs[256] = {
> >  [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
> >  [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
> >  [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
> > +[NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP,
> 
> Missing NVME_CMD_EFF_NIC for the attachment command.

Will do that!

> >  };
> >  
> >  static const uint32_t nvme_cse_iocs_none[256];
> > @@ -3766,6 +3767,62 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest 
> > *req)
> >  return NVME_NO_COMPLETE;
> >  }
> >  
> > +static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
> > +static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +NvmeNamespace *ns;
> > +NvmeCtrl *ctrl;
> > +uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
> > +uint32_t nsid = le32_to_cpu(req->cmd.nsid);
> > +uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> > +bool attach = !(dw10 & 0xf);
> > +uint16_t *nr_ids = [0];
> > +uint16_t *ids = [1];
> > +uint16_t ret;
> > +int i;
> > +
> > +trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
> > +
> > +ns = nvme_subsys_ns(n->subsys, nsid);
> > +if (!ns) {
> > +return NVME_INVALID_FIELD | NVME_DNR;
> > +}
> > +
> > +ret = nvme_dma(n, (uint8_t *)list, 4096,
> > +   DMA_DIRECTION_TO_DEVICE, req);
> > +if (ret) {
> > +return ret;
> > +}
> > +
> > +if (!*nr_ids) {
> > +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> > +}
> > +
> > +for (i = 0; i < *nr_ids; i++) {
> > +ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
> > +if (!ctrl) {
> > +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> > +}
> > +
> > +if (attach) {
> > +if (nvme_ns_is_attached(ctrl, ns)) {
> > +return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
> > +}
> > +
> > +nvme_ns_attach(ctrl, ns);
> > +__nvme_select_ns_iocs(ctrl, ns);
> > +} else {
> > +if (!nvme_ns_is_attached(ctrl, ns)) {
> > +return NVME_NS_NOT_ATTACHED | NVME_DNR;
> > +}
> > +
> > +nvme_ns_detach(ctrl, ns);
> > +}
> > +}
> > +
> > +return NVME_SUCCESS;
> > +}
> 
> Every controller that has newly attached the namespace needs to emit the
> Namespace Notify AER in order for the host to react correctly to the
> command.

Okay. will prepare next series.

Thanks!



Re: [PATCH V2 6/7] hw/block/nvme: support namespace attachment command

2021-02-26 Thread Keith Busch
On Thu, Feb 11, 2021 at 01:09:36AM +0900, Minwoo Im wrote:
> @@ -183,6 +183,7 @@ static const uint32_t nvme_cse_acs[256] = {
>  [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
> +[NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP,

Missing NVME_CMD_EFF_NIC for the attachment command.

>  };
>  
>  static const uint32_t nvme_cse_iocs_none[256];
> @@ -3766,6 +3767,62 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
>  return NVME_NO_COMPLETE;
>  }
>  
> +static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
> +static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
> +{
> +NvmeNamespace *ns;
> +NvmeCtrl *ctrl;
> +uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
> +uint32_t nsid = le32_to_cpu(req->cmd.nsid);
> +uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> +bool attach = !(dw10 & 0xf);
> +uint16_t *nr_ids = [0];
> +uint16_t *ids = [1];
> +uint16_t ret;
> +int i;
> +
> +trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
> +
> +ns = nvme_subsys_ns(n->subsys, nsid);
> +if (!ns) {
> +return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
> +ret = nvme_dma(n, (uint8_t *)list, 4096,
> +   DMA_DIRECTION_TO_DEVICE, req);
> +if (ret) {
> +return ret;
> +}
> +
> +if (!*nr_ids) {
> +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> +}
> +
> +for (i = 0; i < *nr_ids; i++) {
> +ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
> +if (!ctrl) {
> +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> +}
> +
> +if (attach) {
> +if (nvme_ns_is_attached(ctrl, ns)) {
> +return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
> +}
> +
> +nvme_ns_attach(ctrl, ns);
> +__nvme_select_ns_iocs(ctrl, ns);
> +} else {
> +if (!nvme_ns_is_attached(ctrl, ns)) {
> +return NVME_NS_NOT_ATTACHED | NVME_DNR;
> +}
> +
> +nvme_ns_detach(ctrl, ns);
> +}
> +}
> +
> +return NVME_SUCCESS;
> +}

Every controller that has newly attached the namespace needs to emit the
Namespace Notify AER in order for the host to react correctly to the
command.



Re: [PATCH V2 6/7] hw/block/nvme: support namespace attachment command

2021-02-22 Thread Klaus Jensen
On Feb 11 01:09, Minwoo Im wrote:
> This patch supports Namespace Attachment command for the pre-defined
> nvme-ns device nodes.  Of course, attach/detach namespace should only be
> supported in case 'subsys' is given.  This is because if we detach a
> namespace from a controller, somebody needs to manage the detached, but
> allocated namespace in the NVMe subsystem.
> 
> Signed-off-by: Minwoo Im 
> ---
>  hw/block/nvme-subsys.h | 10 +++
>  hw/block/nvme.c| 59 ++
>  hw/block/nvme.h|  5 
>  hw/block/trace-events  |  2 ++
>  include/block/nvme.h   |  5 
>  5 files changed, 81 insertions(+)
> 
> diff --git a/hw/block/nvme-subsys.h b/hw/block/nvme-subsys.h
> index 14627f9ccb41..ef4bec928eae 100644
> --- a/hw/block/nvme-subsys.h
> +++ b/hw/block/nvme-subsys.h
> @@ -30,6 +30,16 @@ typedef struct NvmeSubsystem {
>  int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
>  int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp);
>  
> +static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
> +uint32_t cntlid)
> +{
> +if (!subsys) {
> +return NULL;
> +}
> +
> +return subsys->ctrls[cntlid];
> +}
> +
>  /*
>   * Return allocated namespace of the specified nsid in the subsystem.
>   */
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 697368a6ae0c..71bcd66f1956 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -183,6 +183,7 @@ static const uint32_t nvme_cse_acs[256] = {
>  [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
> +[NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP,
>  };
>  
>  static const uint32_t nvme_cse_iocs_none[256];
> @@ -3766,6 +3767,62 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
>  return NVME_NO_COMPLETE;
>  }
>  
> +static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
> +static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
> +{
> +NvmeNamespace *ns;
> +NvmeCtrl *ctrl;
> +uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
> +uint32_t nsid = le32_to_cpu(req->cmd.nsid);
> +uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> +bool attach = !(dw10 & 0xf);
> +uint16_t *nr_ids = [0];
> +uint16_t *ids = [1];
> +uint16_t ret;
> +int i;
> +
> +trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
> +
> +ns = nvme_subsys_ns(n->subsys, nsid);
> +if (!ns) {
> +return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
> +ret = nvme_dma(n, (uint8_t *)list, 4096,
> +   DMA_DIRECTION_TO_DEVICE, req);
> +if (ret) {
> +return ret;
> +}
> +
> +if (!*nr_ids) {
> +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> +}
> +
> +for (i = 0; i < *nr_ids; i++) {
> +ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
> +if (!ctrl) {
> +return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
> +}
> +
> +if (attach) {
> +if (nvme_ns_is_attached(ctrl, ns)) {
> +return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
> +}
> +
> +nvme_ns_attach(ctrl, ns);
> +__nvme_select_ns_iocs(ctrl, ns);
> +} else {
> +if (!nvme_ns_is_attached(ctrl, ns)) {
> +return NVME_NS_NOT_ATTACHED | NVME_DNR;
> +}
> +
> +nvme_ns_detach(ctrl, ns);
> +}
> +}
> +
> +return NVME_SUCCESS;
> +}
> +
>  static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
>  {
>  trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
> @@ -3797,6 +3854,8 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest 
> *req)
>  return nvme_get_feature(n, req);
>  case NVME_ADM_CMD_ASYNC_EV_REQ:
>  return nvme_aer(n, req);
> +case NVME_ADM_CMD_NS_ATTACHMENT:
> +return nvme_ns_attachment(n, req);
>  default:
>  assert(false);
>  }
> diff --git a/hw/block/nvme.h b/hw/block/nvme.h
> index 1c7796b20996..5a1ab857d166 100644
> --- a/hw/block/nvme.h
> +++ b/hw/block/nvme.h
> @@ -222,6 +222,11 @@ static inline void nvme_ns_attach(NvmeCtrl *n, 
> NvmeNamespace *ns)
>  n->namespaces[nvme_nsid(ns) - 1] = ns;
>  }
>  
> +static inline void nvme_ns_detach(NvmeCtrl *n, NvmeNamespace *ns)
> +{
> +n->namespaces[nvme_nsid(ns) - 1] = NULL;
> +}
> +
>  static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
>  {
>  NvmeSQueue *sq = req->sq;
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index b6e972d733a6..bf67fe7873d2 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -80,6 +80,8 @@ pci_nvme_aer(uint16_t cid) "cid %"PRIu16""
>  pci_nvme_aer_aerl_exceeded(void) "aerl exceeded"
>  pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 
> 0x%"PRIx8""
>  pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t 

[PATCH V2 6/7] hw/block/nvme: support namespace attachment command

2021-02-10 Thread Minwoo Im
This patch supports Namespace Attachment command for the pre-defined
nvme-ns device nodes.  Of course, attach/detach namespace should only be
supported in case 'subsys' is given.  This is because if we detach a
namespace from a controller, somebody needs to manage the detached, but
allocated namespace in the NVMe subsystem.

Signed-off-by: Minwoo Im 
---
 hw/block/nvme-subsys.h | 10 +++
 hw/block/nvme.c| 59 ++
 hw/block/nvme.h|  5 
 hw/block/trace-events  |  2 ++
 include/block/nvme.h   |  5 
 5 files changed, 81 insertions(+)

diff --git a/hw/block/nvme-subsys.h b/hw/block/nvme-subsys.h
index 14627f9ccb41..ef4bec928eae 100644
--- a/hw/block/nvme-subsys.h
+++ b/hw/block/nvme-subsys.h
@@ -30,6 +30,16 @@ typedef struct NvmeSubsystem {
 int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
 int nvme_subsys_register_ns(NvmeNamespace *ns, Error **errp);
 
+static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
+uint32_t cntlid)
+{
+if (!subsys) {
+return NULL;
+}
+
+return subsys->ctrls[cntlid];
+}
+
 /*
  * Return allocated namespace of the specified nsid in the subsystem.
  */
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 697368a6ae0c..71bcd66f1956 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -183,6 +183,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP,
 };
 
 static const uint32_t nvme_cse_iocs_none[256];
@@ -3766,6 +3767,62 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
 return NVME_NO_COMPLETE;
 }
 
+static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
+static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
+{
+NvmeNamespace *ns;
+NvmeCtrl *ctrl;
+uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
+uint32_t nsid = le32_to_cpu(req->cmd.nsid);
+uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+bool attach = !(dw10 & 0xf);
+uint16_t *nr_ids = [0];
+uint16_t *ids = [1];
+uint16_t ret;
+int i;
+
+trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
+
+ns = nvme_subsys_ns(n->subsys, nsid);
+if (!ns) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+ret = nvme_dma(n, (uint8_t *)list, 4096,
+   DMA_DIRECTION_TO_DEVICE, req);
+if (ret) {
+return ret;
+}
+
+if (!*nr_ids) {
+return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
+}
+
+for (i = 0; i < *nr_ids; i++) {
+ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
+if (!ctrl) {
+return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
+}
+
+if (attach) {
+if (nvme_ns_is_attached(ctrl, ns)) {
+return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
+}
+
+nvme_ns_attach(ctrl, ns);
+__nvme_select_ns_iocs(ctrl, ns);
+} else {
+if (!nvme_ns_is_attached(ctrl, ns)) {
+return NVME_NS_NOT_ATTACHED | NVME_DNR;
+}
+
+nvme_ns_detach(ctrl, ns);
+}
+}
+
+return NVME_SUCCESS;
+}
+
 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
 {
 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
@@ -3797,6 +3854,8 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest 
*req)
 return nvme_get_feature(n, req);
 case NVME_ADM_CMD_ASYNC_EV_REQ:
 return nvme_aer(n, req);
+case NVME_ADM_CMD_NS_ATTACHMENT:
+return nvme_ns_attachment(n, req);
 default:
 assert(false);
 }
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 1c7796b20996..5a1ab857d166 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -222,6 +222,11 @@ static inline void nvme_ns_attach(NvmeCtrl *n, 
NvmeNamespace *ns)
 n->namespaces[nvme_nsid(ns) - 1] = ns;
 }
 
+static inline void nvme_ns_detach(NvmeCtrl *n, NvmeNamespace *ns)
+{
+n->namespaces[nvme_nsid(ns) - 1] = NULL;
+}
+
 static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
 {
 NvmeSQueue *sq = req->sq;
diff --git a/hw/block/trace-events b/hw/block/trace-events
index b6e972d733a6..bf67fe7873d2 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -80,6 +80,8 @@ pci_nvme_aer(uint16_t cid) "cid %"PRIu16""
 pci_nvme_aer_aerl_exceeded(void) "aerl exceeded"
 pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 
0x%"PRIx8""
 pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 
0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
+pci_nvme_ns_attachment(uint16_t cid, uint8_t sel) "cid %"PRIu16", 
sel=0x%"PRIx8""
+pci_nvme_ns_attachment_attach(uint16_t cntlid, uint32_t nsid) 
"cntlid=0x%"PRIx16", nsid=0x%"PRIx32""
 pci_nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type