[RFC PATCH v4] hw/nvme:Adding Support for namespace management

2022-03-23 Thread Naveen
From: Naveen Nagar 

This patch supports namespace management : create and delete operations
This patch has been tested with the following command and size of image
file for unallocated namespaces is taken as 0GB. nvme_namespace_create
will look into the list of unallocated namespaces and it will initialize 
and return the nsid of the same. A new mandatory field has been added
called tnvmcap and we have ensured that the total capacity of namespace 
created does not exceed tnvmcap

-device nvme-subsys,id=subsys0,tnvmcap=8
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0

-drive id=ns1,file=ns1.img,if=none
-device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
-drive id=ns2,file=ns2.img,if=none
-device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
-drive id=ns3,file=ns3.img,if=none
-device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
-drive id=ns4,file=ns4.img,if=none
-device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true

Please review and suggest if any changes are required.

Signed-off-by: Naveen Nagar 

Since v3:
-Lukasz Maniak found a bug related to unvmcap support and proposed
 solution is added

---
 hw/nvme/ctrl.c   | 257 ---
 hw/nvme/ns.c |  80 +-
 hw/nvme/nvme.h   |   7 +-
 hw/nvme/subsys.c |   3 +
 include/block/nvme.h |  18 ++-
 5 files changed, 317 insertions(+), 48 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 03760ddeae..5c5b915dc6 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -222,6 +222,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
 };
@@ -4715,11 +4716,19 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeRequest *req, bool active)
 NvmeNamespace *ns;
 NvmeIdentify *c = (NvmeIdentify *)>cmd;
 uint32_t nsid = le32_to_cpu(c->nsid);
+NvmeIdNs *id_ns = NULL;
+uint16_t ret;
 
 trace_pci_nvme_identify_ns(nsid);
 
-if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+if (!nvme_nsid_valid(n, nsid)) {
 return NVME_INVALID_NSID | NVME_DNR;
+} else if (nsid == NVME_NSID_BROADCAST) {
+id_ns = g_new0(NvmeIdNs, 1);
+nvme_ns_identify_common(id_ns);
+ret = nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
+g_free(id_ns);
+return ret;
 }
 
 ns = nvme_ns(n, nsid);
@@ -5475,9 +5484,208 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
NvmeNamespace *ns)
 }
 }
 
+static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
+{
+int ret;
+uint64_t perm, shared_perm;
+
+blk_get_perm(blk, , _perm);
+
+ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_set_perm(blk, perm, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+return 0;
+}
+
+static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
+{
+uint32_t nsid = 0;
+for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
+continue;
+}
+
+nsid = i;
+return nsid;
+}
+return nsid;
+}
+
+static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
+{
+uint32_t ret;
+NvmeIdNs id_ns_host;
+NvmeSubsystem *subsys = n->subsys;
+Error *err = NULL;
+uint8_t flbas_host;
+uint64_t ns_size;
+int lba_index;
+NvmeNamespace *ns;
+NvmeCtrl *ctrl;
+NvmeIdNs *id_ns;
+
+ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
+if (ret) {
+return ret;
+}
+
+if (id_ns_host.ncap < id_ns_host.nsze) {
+return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
+} else if (id_ns_host.ncap > id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (!id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (QSLIST_EMPTY(>unallocated_namespaces)) {
+return NVME_NS_ID_UNAVAILABLE;
+}
+
+ns = QSLIST_FIRST(>unallocated_namespaces);
+id_ns = >id_ns;
+flbas_host = (id_ns_host.flbas) & (0xF);
+
+if (flbas_host > id_ns->nlbaf) {
+return NVME_INVALID_FORMAT | NVME_DNR;
+}
+
+ret = nvme_ns_setup(ns, );
+if (ret) {
+return ret;
+}
+
+id_ns->flbas = id_ns_host.flbas;
+id_ns->dps = id_ns_host.dp

[RFC PATCH v3] hw/nvme:Adding Support for namespace management

2021-11-10 Thread Naveen
From: Naveen Nagar 

This patch supports namespace management : create and delete operations
This patch has been tested with the following command and size of image
file for unallocated namespaces is taken as 0GB. ns_create will look into
the list of unallocated namespaces and it will initialize the same and 
return the nsid of the same. A new mandatory field has been added called
tnvmcap and we have ensured that the total capacity of namespace created
does not exceed tnvmcap

-device nvme-subsys,id=subsys0,tnvmcap=8
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0

-drive id=ns1,file=ns1.img,if=none
-device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
-drive id=ns2,file=ns2.img,if=none
-device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
-drive id=ns3,file=ns3.img,if=none
-device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
-drive id=ns4,file=ns4.img,if=none
-device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true

Please review and suggest if any changes are required.

Signed-off-by: Naveen Nagar 

Since v2:
-Lukasz Maniak found a bug in namespace attachment and proposed 
 solution is added

---
 hw/nvme/ctrl.c   | 241 ---
 hw/nvme/ns.c |  61 ++-
 hw/nvme/nvme.h   |   7 +-
 hw/nvme/subsys.c |   3 +
 include/block/nvme.h |  18 +++-
 5 files changed, 288 insertions(+), 42 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 6baf9e0420..63ea2fcb14 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -219,6 +219,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
 };
@@ -4450,11 +4451,19 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeRequest *req, bool active)
 NvmeNamespace *ns;
 NvmeIdentify *c = (NvmeIdentify *)>cmd;
 uint32_t nsid = le32_to_cpu(c->nsid);
+NvmeIdNs *id_ns = NULL;
+uint16_t ret;
 
 trace_pci_nvme_identify_ns(nsid);
 
-if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+if (!nvme_nsid_valid(n, nsid)) {
 return NVME_INVALID_NSID | NVME_DNR;
+} else if (nsid == NVME_NSID_BROADCAST) {
+id_ns = g_new0(NvmeIdNs, 1);
+nvme_ns_identify_common(id_ns);
+ret = nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
+g_free(id_ns);
+return ret;
 }
 
 ns = nvme_ns(n, nsid);
@@ -5184,9 +5193,204 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
NvmeNamespace *ns)
 }
 }
 
+static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
+{
+int ret;
+uint64_t perm, shared_perm;
+
+blk_get_perm(blk, , _perm);
+
+ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_set_perm(blk, perm, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+return 0;
+}
+
+static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
+{
+uint32_t nsid = 0;
+for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
+continue;
+}
+
+nsid = i;
+return nsid;
+}
+return nsid;
+}
+
+static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
+{
+uint32_t ret;
+NvmeIdNs id_ns_host;
+NvmeSubsystem *subsys = n->subsys;
+Error *err = NULL;
+uint8_t flbas_host;
+uint64_t ns_size;
+int lba_index;
+NvmeNamespace *ns;
+NvmeCtrl *ctrl;
+NvmeIdNs *id_ns;
+
+ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
+if (ret) {
+return ret;
+}
+
+if (id_ns_host.ncap < id_ns_host.nsze) {
+return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
+} else if (id_ns_host.ncap > id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (!id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (QSLIST_EMPTY(>unallocated_namespaces)) {
+return NVME_NS_ID_UNAVAILABLE;
+}
+
+ns = QSLIST_FIRST(>unallocated_namespaces);
+id_ns = >id_ns;
+flbas_host = (id_ns_host.flbas) & (0xF);
+
+if (flbas_host > id_ns->nlbaf) {
+return NVME_INVALID_FORMAT | NVME_DNR;
+}
+
+ret = nvme_ns_setup(ns, );
+if (ret) {
+return ret;
+}
+
+id_ns->flbas = id_ns_host.flbas;
+id_ns->dps = id_ns_host.dp

hw/nvme: fix verification of select field in namespace attachment

2021-08-23 Thread Naveen
Fix is added to check for reserved value in select field for 
namespace attachment

Signed-off-by: Naveen Nagar 
Signed-off-by: Klaus Jensen 
cc: Minwoo Im 

---
 hw/nvme/ctrl.c   | 13 +
 include/block/nvme.h |  5 +
 2 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 6baf9e0..2c59c74 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -5191,7 +5191,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, 
NvmeRequest *req)
 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
-bool attach = !(dw10 & 0xf);
+uint8_t sel = dw10 & 0xf;
 uint16_t *nr_ids = [0];
 uint16_t *ids = [1];
 uint16_t ret;
@@ -5224,7 +5224,8 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, 
NvmeRequest *req)
 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
 }
 
-if (attach) {
+switch (sel) {
+case NVME_NS_ATTACHMENT_ATTACH:
 if (nvme_ns(ctrl, nsid)) {
 return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
 }
@@ -5235,7 +5236,8 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, 
NvmeRequest *req)
 
 nvme_attach_ns(ctrl, ns);
 nvme_select_iocs_ns(ctrl, ns);
-} else {
+break;
+case NVME_NS_ATTACHMENT_DETACH:
 if (!nvme_ns(ctrl, nsid)) {
 return NVME_NS_NOT_ATTACHED | NVME_DNR;
 }
@@ -5244,8 +5246,11 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, 
NvmeRequest *req)
 ns->attached--;
 
 nvme_update_dmrsl(ctrl);
+break;
+default:
+return NVME_INVALID_FIELD | NVME_DNR;
 }
-
+
 /*
  * Add namespace id to the changed namespace id list for event clearing
  * via Get Log Page command.
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 77aae01..e3bd47b 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1154,6 +1154,11 @@ enum NvmeIdCtrlCmic {
 NVME_CMIC_MULTI_CTRL= 1 << 1,
 };
 
+enum NvmeNsAttachmentOperation {
+NVME_NS_ATTACHMENT_ATTACH = 0x0,
+NVME_NS_ATTACHMENT_DETACH = 0x1,
+};
+
 #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
 #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
 #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
-- 
1.8.3.1




[RFC PATCH v2] hw/nvme:Adding Support for namespace management

2021-08-19 Thread Naveen Nagar
From: Naveen 

This patch supports namespace management : create and delete operations.

Since v1:
- Modified and moved nvme_ns_identify_common in ns.c file 
- Added check for CSI field in NS management
- Indentation fix in namespace create

This patch has been tested with the following command and size of image
file for unallocated namespaces is taken as 0GB. ns_create will look into
the list of unallocated namespaces and it will initialize the same and 
return the nsid of the same. A new mandatory field has been added called
tnvmcap and we have ensured that the total capacity of namespace created
does not exceed tnvmcap

-device nvme-subsys,id=subsys0,tnvmcap=8
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0
-drive id=ns1,file=ns1.img,if=none
-device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
-drive id=ns2,file=ns2.img,if=none
-device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
-drive id=ns3,file=ns3.img,if=none
-device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
-drive id=ns4,file=ns4.img,if=none
-device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true

Please review and suggest if any changes are required.

Signed-off-by: Naveen Nagar 
Reviewed-by: Klaus Jensen 
  
---
 hw/nvme/ctrl.c   | 237 +--
 hw/nvme/ns.c |  61 ++-
 hw/nvme/nvme.h   |   7 +-
 hw/nvme/subsys.c |   3 +
 include/block/nvme.h |  18 +++-
 5 files changed, 285 insertions(+), 41 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 6baf9e0420..992aaa7d02 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -219,6 +219,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
 };
@@ -4450,11 +4451,19 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeRequest *req, bool active)
 NvmeNamespace *ns;
 NvmeIdentify *c = (NvmeIdentify *)>cmd;
 uint32_t nsid = le32_to_cpu(c->nsid);
+NvmeIdNs *id_ns = NULL;
+uint16_t ret;
 
 trace_pci_nvme_identify_ns(nsid);
 
-if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+if (!nvme_nsid_valid(n, nsid)) {
 return NVME_INVALID_NSID | NVME_DNR;
+} else if (nsid == NVME_NSID_BROADCAST) {
+id_ns = g_new0(NvmeIdNs, 1);
+nvme_ns_identify_common(id_ns);
+ret = nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
+g_free(id_ns);
+return ret;
 }
 
 ns = nvme_ns(n, nsid);
@@ -5184,6 +5193,200 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
NvmeNamespace *ns)
 }
 }
 
+static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
+{
+int ret;
+uint64_t perm, shared_perm;
+
+blk_get_perm(blk, , _perm);
+
+ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_set_perm(blk, perm, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+return 0;
+}
+
+static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
+{
+uint32_t nsid = 0;
+for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
+continue;
+}
+
+nsid = i;
+return nsid;
+}
+return nsid;
+}
+
+static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
+{
+uint32_t ret;
+NvmeIdNs id_ns_host;
+NvmeSubsystem *subsys = n->subsys;
+Error *err = NULL;
+uint8_t flbas_host;
+uint64_t ns_size;
+int lba_index;
+NvmeNamespace *ns;
+NvmeCtrl *ctrl;
+NvmeIdNs *id_ns;
+
+ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
+if (ret) {
+return ret;
+}
+
+if (id_ns_host.ncap < id_ns_host.nsze) {
+return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
+} else if (id_ns_host.ncap > id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (!id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (QSLIST_EMPTY(>unallocated_namespaces)) {
+return NVME_NS_ID_UNAVAILABLE;
+}
+
+ns = QSLIST_FIRST(>unallocated_namespaces);
+id_ns = >id_ns;
+flbas_host = (id_ns_host.flbas) & (0xF);
+
+if (flbas_host > id_ns->nlbaf) {
+return NVME_INVALID_FORMAT | NVME_DNR;
+}
+
+ret = nvme_ns_setup(ns, );
+if (ret) {
+return ret;
+}
+
+   

[RFC PATCH v1] Adding Support for namespace management

2021-08-13 Thread Naveen
This patch supports namespace management : create and delete operations.
This patch has been tested with the following command and size of image
file for unallocated namespaces is taken as 0GB. ns_create will look into
the list of unallocated namespaces and it will initialize the same and 
return the nsid of the same. A new mandatory field has been added called
tnvmcap and we have ensured that the total capacity of namespace created
does not exceed tnvmcap

-device nvme-subsys,id=subsys0,tnvmcap=8
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0

-drive id=ns1,file=ns1.img,if=none
-device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
-drive id=ns2,file=ns2.img,if=none
-device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
-drive id=ns3,file=ns3.img,if=none
-device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
-drive id=ns4,file=ns4.img,if=none
-device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true

Please review and suggest if any changes are required.

Signed-off-by: Naveen Nagar 
Reviewed-by: Klaus Jensen 
Reviewed-by: Padmakar Kalghatgi 
Reviewed-by: Gollu Appalanaidu 
Reviewed-by: Jaegyu Choi 

---
 hw/nvme/ctrl.c   | 250 +--
 hw/nvme/ns.c |  14 ++-
 hw/nvme/nvme.h   |   6 +-
 hw/nvme/subsys.c |   3 +
 include/block/nvme.h |  18 +++-
 5 files changed, 272 insertions(+), 19 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 6baf9e0420..4be23a3092 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -219,6 +219,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
 };
@@ -4445,6 +4446,30 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, 
NvmeRequest *req)
 return nvme_c2h(n, id, sizeof(id), req);
 }
 
+static uint16_t nvme_identify_ns_common(NvmeCtrl *n, NvmeRequest *req)
+{
+NvmeIdNs id_ns = {};
+
+id_ns.nsfeat |= (0x4 | 0x10);
+id_ns.dpc = 0x1f;
+
+NvmeLBAF lbaf[16] = {
+[0] = {.ds = 9},
+[1] = {.ds = 9, .ms = 8},
+[2] = {.ds = 9, .ms = 16},
+[3] = {.ds = 9, .ms = 64},
+[4] = {.ds = 12},
+[5] = {.ds = 12, .ms = 8},
+[6] = {.ds = 12, .ms = 16},
+[7] = {.ds = 12, .ms = 64},
+};
+
+memcpy(_ns.lbaf, , sizeof(lbaf));
+id_ns.nlbaf = 7;
+
+return nvme_c2h(n, (uint8_t *)_ns, sizeof(NvmeIdNs), req);
+}
+
 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
 {
 NvmeNamespace *ns;
@@ -4453,8 +4478,10 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeRequest *req, bool active)
 
 trace_pci_nvme_identify_ns(nsid);
 
-if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+if (!nvme_nsid_valid(n, nsid)) {
 return NVME_INVALID_NSID | NVME_DNR;
+} else if (nsid == NVME_NSID_BROADCAST) {
+return nvme_identify_ns_common(n, req);
 }
 
 ns = nvme_ns(n, nsid);
@@ -5184,6 +5211,195 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
NvmeNamespace *ns)
 }
 }
 
+static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
+{
+int ret;
+uint64_t perm, shared_perm;
+
+blk_get_perm(blk, , _perm);
+
+ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_set_perm(blk, perm, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+return 0;
+}
+
+static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
+{
+uint32_t nsid = 0;
+for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
+continue;
+}
+
+nsid = i;
+return nsid;
+}
+return nsid;
+}
+
+static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
+{
+   uint32_t ret;
+   NvmeIdNs id_ns_host;
+   NvmeSubsystem *subsys = n->subsys;
+   Error *err = NULL;
+   uint8_t flbas_host;
+   uint64_t ns_size;
+   int lba_index;
+   NvmeNamespace *ns;
+   NvmeCtrl *ctrl;
+   NvmeIdNs *id_ns;
+
+ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
+if (ret) {
+return ret;
+}
+
+if (id_ns_host.ncap < id_ns_host.nsze) {
+return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
+} else if (id_ns_host.ncap > id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (!id_ns_host.nsze) {
+return NVME_