Re: [PATCH v2] hw/block/nvme: add compare command

2020-12-08 Thread Keith Busch
On Thu, Nov 26, 2020 at 07:56:05PM +0100, Klaus Jensen wrote:
> From: Gollu Appalanaidu 
> 
> Add the Compare command.
> 
> This implementation uses a bounce buffer to read in the data from
> storage and then compare with the host supplied buffer.
> 
> Signed-off-by: Gollu Appalanaidu 
> [k.jensen: rebased]
> Signed-off-by: Klaus Jensen 

Looks good.

Reviewed-by: Keith Busch 



Re: [PATCH v2] hw/block/nvme: add compare command

2020-12-08 Thread Klaus Jensen
On Nov 27 07:21, Minwoo Im wrote:
> Hello,
> 
> On Fri, Nov 27, 2020 at 3:56 AM Klaus Jensen  wrote:
> >
> > From: Gollu Appalanaidu 
> >
> > Add the Compare command.
> >
> > This implementation uses a bounce buffer to read in the data from
> > storage and then compare with the host supplied buffer.
> >
> > Signed-off-by: Gollu Appalanaidu 
> > [k.jensen: rebased]
> > Signed-off-by: Klaus Jensen 
> 
> 
> Reviewed-by: Minwoo Im 
> 

Thanks, applied to nvme-next.


signature.asc
Description: PGP signature


Re: [PATCH v2] hw/block/nvme: add compare command

2020-11-26 Thread Minwoo Im
Hello,

On Fri, Nov 27, 2020 at 3:56 AM Klaus Jensen  wrote:
>
> From: Gollu Appalanaidu 
>
> Add the Compare command.
>
> This implementation uses a bounce buffer to read in the data from
> storage and then compare with the host supplied buffer.
>
> Signed-off-by: Gollu Appalanaidu 
> [k.jensen: rebased]
> Signed-off-by: Klaus Jensen 


Reviewed-by: Minwoo Im 



[PATCH v2] hw/block/nvme: add compare command

2020-11-26 Thread Klaus Jensen
From: Gollu Appalanaidu 

Add the Compare command.

This implementation uses a bounce buffer to read in the data from
storage and then compare with the host supplied buffer.

Signed-off-by: Gollu Appalanaidu 
[k.jensen: rebased]
Signed-off-by: Klaus Jensen 
---

Notes:
v2: save status in request on error (Minwoo)

 hw/block/nvme.c   | 101 +-
 hw/block/trace-events |   2 +
 2 files changed, 102 insertions(+), 1 deletion(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index f7f888402b06..8814201364c1 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -999,6 +999,51 @@ static void nvme_aio_discard_cb(void *opaque, int ret)
 nvme_enqueue_req_completion(nvme_cq(req), req);
 }
 
+struct nvme_compare_ctx {
+QEMUIOVector iov;
+uint8_t *bounce;
+size_t len;
+};
+
+static void nvme_compare_cb(void *opaque, int ret)
+{
+NvmeRequest *req = opaque;
+NvmeNamespace *ns = req->ns;
+struct nvme_compare_ctx *ctx = req->opaque;
+g_autofree uint8_t *buf = NULL;
+uint16_t status;
+
+trace_pci_nvme_compare_cb(nvme_cid(req));
+
+if (!ret) {
+block_acct_done(blk_get_stats(ns->blkconf.blk), >acct);
+} else {
+block_acct_failed(blk_get_stats(ns->blkconf.blk), >acct);
+nvme_aio_err(req, ret);
+goto out;
+}
+
+buf = g_malloc(ctx->len);
+
+status = nvme_dma(nvme_ctrl(req), buf, ctx->len, DMA_DIRECTION_TO_DEVICE,
+  req);
+if (status) {
+req->status = status;
+goto out;
+}
+
+if (memcmp(buf, ctx->bounce, ctx->len)) {
+req->status = NVME_CMP_FAILURE;
+}
+
+out:
+qemu_iovec_destroy(>iov);
+g_free(ctx->bounce);
+g_free(ctx);
+
+nvme_enqueue_req_completion(nvme_cq(req), req);
+}
+
 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
 {
 NvmeNamespace *ns = req->ns;
@@ -1072,6 +1117,57 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
 return status;
 }
 
+static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
+{
+NvmeRwCmd *rw = (NvmeRwCmd *)>cmd;
+NvmeNamespace *ns = req->ns;
+BlockBackend *blk = ns->blkconf.blk;
+uint64_t slba = le64_to_cpu(rw->slba);
+uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
+size_t len = nvme_l2b(ns, nlb);
+int64_t offset = nvme_l2b(ns, slba);
+uint8_t *bounce = NULL;
+struct nvme_compare_ctx *ctx = NULL;
+uint16_t status;
+
+trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb);
+
+status = nvme_check_mdts(n, len);
+if (status) {
+trace_pci_nvme_err_mdts(nvme_cid(req), len);
+return status;
+}
+
+status = nvme_check_bounds(ns, slba, nlb);
+if (status) {
+trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
+return status;
+}
+
+if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
+status = nvme_check_dulbe(ns, slba, nlb);
+if (status) {
+return status;
+}
+}
+
+bounce = g_malloc(len);
+
+ctx = g_new(struct nvme_compare_ctx, 1);
+ctx->bounce = bounce;
+ctx->len = len;
+
+req->opaque = ctx;
+
+qemu_iovec_init(>iov, 1);
+qemu_iovec_add(>iov, bounce, len);
+
+block_acct_start(blk_get_stats(blk), >acct, len, BLOCK_ACCT_READ);
+blk_aio_preadv(blk, offset, >iov, 0, nvme_compare_cb, req);
+
+return NVME_NO_COMPLETE;
+}
+
 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
 {
 block_acct_start(blk_get_stats(req->ns->blkconf.blk), >acct, 0,
@@ -1201,6 +1297,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
 case NVME_CMD_WRITE:
 case NVME_CMD_READ:
 return nvme_rw(n, req);
+case NVME_CMD_COMPARE:
+return nvme_compare(n, req);
 case NVME_CMD_DSM:
 return nvme_dsm(n, req);
 default:
@@ -2927,7 +3025,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice 
*pci_dev)
 id->cqes = (0x4 << 4) | 0x4;
 id->nn = cpu_to_le32(n->num_namespaces);
 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
-   NVME_ONCS_FEATURES | NVME_ONCS_DSM);
+   NVME_ONCS_FEATURES | NVME_ONCS_DSM |
+   NVME_ONCS_COMPARE);
 
 id->vwc = 0x1;
 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 1ffe0b3f76b5..68a4c8ed35e0 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -46,6 +46,8 @@ pci_nvme_write_zeroes(uint16_t cid, uint32_t nsid, uint64_t 
slba, uint32_t nlb)
 pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, 
bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed 
%d"
 pci_nvme_dsm(uint16_t cid, uint32_t nsid, uint32_t nr, uint32_t attr) "cid 
%"PRIu16" nsid %"PRIu32" nr %"PRIu32" attr 0x%"PRIx32""
 pci_nvme_dsm_deallocate(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t 
nlb) "cid %"PRIu16" nsid