Re: [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing

2020-07-29 Thread Klaus Jensen
On Jul 29 21:18, Maxim Levitsky wrote:
> On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > Always destroy the request qsg/iov at the end of request use.
> > 
> > Signed-off-by: Klaus Jensen 
> > ---
> >  hw/block/nvme.c | 48 +---
> >  1 file changed, 17 insertions(+), 31 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 54cd20f1ce22..b53afdeb3fb6 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -213,6 +213,14 @@ static void nvme_req_clear(NvmeRequest *req)
> >  {
> >  req->ns = NULL;
> >  memset(>cqe, 0x0, sizeof(req->cqe));
> > +
> > +if (req->qsg.sg) {
> > +qemu_sglist_destroy(>qsg);
> > +}
> > +
> > +if (req->iov.iov) {
> > +qemu_iovec_destroy(>iov);
> > +}
> >  }
> >  
> >  static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr 
> > addr,
> > @@ -297,15 +305,14 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  
> >  status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> >  if (status) {
> > -goto unmap;
> > +return status;
> >  }
> >  
> >  len -= trans_len;
> >  if (len) {
> >  if (unlikely(!prp2)) {
> >  trace_pci_nvme_err_invalid_prp2_missing();
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > -goto unmap;
> > +return NVME_INVALID_FIELD | NVME_DNR;
> >  }
> >  
> >  if (len > n->page_size) {
> > @@ -326,13 +333,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  if (i == n->max_prp_ents - 1 && len > n->page_size) {
> >  if (unlikely(!prp_ent || prp_ent & (n->page_size - 
> > 1))) {
> >  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > -goto unmap;
> > +return NVME_INVALID_FIELD | NVME_DNR;
> >  }
> >  
> >  if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
> > -status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
> > -goto unmap;
> > +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> >  }
> >  
> >  i = 0;
> > @@ -345,14 +350,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  
> >  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> >  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > -goto unmap;
> > +return NVME_INVALID_FIELD | NVME_DNR;
> >  }
> >  
> >  trans_len = MIN(len, n->page_size);
> >  status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
> >  if (status) {
> > -goto unmap;
> > +return status;
> >  }
> >  
> >  len -= trans_len;
> > @@ -361,27 +365,16 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  } else {
> >  if (unlikely(prp2 & (n->page_size - 1))) {
> >  trace_pci_nvme_err_invalid_prp2_align(prp2);
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > -goto unmap;
> > +return NVME_INVALID_FIELD | NVME_DNR;
> >  }
> >  status = nvme_map_addr(n, qsg, iov, prp2, len);
> >  if (status) {
> > -goto unmap;
> > +return status;
> >  }
> >  }
> >  }
> > +
> >  return NVME_SUCCESS;
> > -
> > -unmap:
> > -if (iov && iov->iov) {
> > -qemu_iovec_destroy(iov);
> > -}
> > -
> > -if (qsg && qsg->sg) {
> > -qemu_sglist_destroy(qsg);
> > -}
> > -
> > -return status;
> >  }
> >  
> >  static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> > @@ -601,13 +594,6 @@ static void nvme_rw_cb(void *opaque, int ret)
> >  req->status = NVME_INTERNAL_DEV_ERROR;
> >  }
> >  
> > -if (req->qsg.nalloc) {
> > -qemu_sglist_destroy(>qsg);
> > -}
> > -if (req->iov.nalloc) {
> > -qemu_iovec_destroy(>iov);
> > -}
> > -
> >  nvme_enqueue_req_completion(cq, req);
> >  }
> >  
> 
> This and former patch I guess answer my own question about why to clear the 
> request after its cqe got posted.
> 
> Looks reasonable.
> 

I ended up with a compromise. I keep clearing as a "before-use" job, but
we don't want to keep the qsg and iovs hanging around until the request
gets reused, so I'm adding a nvme_req_exit() to free that memory when
the cqe has been posted.



Re: [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing

2020-07-29 Thread Maxim Levitsky
On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Always destroy the request qsg/iov at the end of request use.
> 
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c | 48 +---
>  1 file changed, 17 insertions(+), 31 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 54cd20f1ce22..b53afdeb3fb6 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -213,6 +213,14 @@ static void nvme_req_clear(NvmeRequest *req)
>  {
>  req->ns = NULL;
>  memset(>cqe, 0x0, sizeof(req->cqe));
> +
> +if (req->qsg.sg) {
> +qemu_sglist_destroy(>qsg);
> +}
> +
> +if (req->iov.iov) {
> +qemu_iovec_destroy(>iov);
> +}
>  }
>  
>  static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr 
> addr,
> @@ -297,15 +305,14 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> *qsg, QEMUIOVector *iov,
>  
>  status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
>  if (status) {
> -goto unmap;
> +return status;
>  }
>  
>  len -= trans_len;
>  if (len) {
>  if (unlikely(!prp2)) {
>  trace_pci_nvme_err_invalid_prp2_missing();
> -status = NVME_INVALID_FIELD | NVME_DNR;
> -goto unmap;
> +return NVME_INVALID_FIELD | NVME_DNR;
>  }
>  
>  if (len > n->page_size) {
> @@ -326,13 +333,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> *qsg, QEMUIOVector *iov,
>  if (i == n->max_prp_ents - 1 && len > n->page_size) {
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> -status = NVME_INVALID_FIELD | NVME_DNR;
> -goto unmap;
> +return NVME_INVALID_FIELD | NVME_DNR;
>  }
>  
>  if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
> -status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
> -goto unmap;
> +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
>  }
>  
>  i = 0;
> @@ -345,14 +350,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> *qsg, QEMUIOVector *iov,
>  
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> -status = NVME_INVALID_FIELD | NVME_DNR;
> -goto unmap;
> +return NVME_INVALID_FIELD | NVME_DNR;
>  }
>  
>  trans_len = MIN(len, n->page_size);
>  status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
>  if (status) {
> -goto unmap;
> +return status;
>  }
>  
>  len -= trans_len;
> @@ -361,27 +365,16 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> *qsg, QEMUIOVector *iov,
>  } else {
>  if (unlikely(prp2 & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prp2_align(prp2);
> -status = NVME_INVALID_FIELD | NVME_DNR;
> -goto unmap;
> +return NVME_INVALID_FIELD | NVME_DNR;
>  }
>  status = nvme_map_addr(n, qsg, iov, prp2, len);
>  if (status) {
> -goto unmap;
> +return status;
>  }
>  }
>  }
> +
>  return NVME_SUCCESS;
> -
> -unmap:
> -if (iov && iov->iov) {
> -qemu_iovec_destroy(iov);
> -}
> -
> -if (qsg && qsg->sg) {
> -qemu_sglist_destroy(qsg);
> -}
> -
> -return status;
>  }
>  
>  static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> @@ -601,13 +594,6 @@ static void nvme_rw_cb(void *opaque, int ret)
>  req->status = NVME_INTERNAL_DEV_ERROR;
>  }
>  
> -if (req->qsg.nalloc) {
> -qemu_sglist_destroy(>qsg);
> -}
> -if (req->iov.nalloc) {
> -qemu_iovec_destroy(>iov);
> -}
> -
>  nvme_enqueue_req_completion(cq, req);
>  }
>  

This and former patch I guess answer my own question about why to clear the 
request after its cqe got posted.

Looks reasonable.

Reviewed-by: Maxim Levitsky 

Best regards,
Maxim Levitsky




Re: [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing

2020-07-29 Thread Minwoo Im
On 20-07-20 13:37:46, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Always destroy the request qsg/iov at the end of request use.
> 
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c | 48 +---
>  1 file changed, 17 insertions(+), 31 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 54cd20f1ce22..b53afdeb3fb6 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -213,6 +213,14 @@ static void nvme_req_clear(NvmeRequest *req)
>  {
>  req->ns = NULL;
>  memset(>cqe, 0x0, sizeof(req->cqe));
> +
> +if (req->qsg.sg) {
> +qemu_sglist_destroy(>qsg);
> +}
> +
> +if (req->iov.iov) {
> +qemu_iovec_destroy(>iov);
> +}

Oh okay.  This looks like update for the previous patch in this series.
And I also agree on starting to make focus on nvme_req_clear() for
wrap-up.

Looks good to me.

Reviewed-by: Minwoo Im 



[PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing

2020-07-20 Thread Klaus Jensen
From: Klaus Jensen 

Always destroy the request qsg/iov at the end of request use.

Signed-off-by: Klaus Jensen 
---
 hw/block/nvme.c | 48 +---
 1 file changed, 17 insertions(+), 31 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 54cd20f1ce22..b53afdeb3fb6 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -213,6 +213,14 @@ static void nvme_req_clear(NvmeRequest *req)
 {
 req->ns = NULL;
 memset(>cqe, 0x0, sizeof(req->cqe));
+
+if (req->qsg.sg) {
+qemu_sglist_destroy(>qsg);
+}
+
+if (req->iov.iov) {
+qemu_iovec_destroy(>iov);
+}
 }
 
 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
@@ -297,15 +305,14 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
*qsg, QEMUIOVector *iov,
 
 status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
 if (status) {
-goto unmap;
+return status;
 }
 
 len -= trans_len;
 if (len) {
 if (unlikely(!prp2)) {
 trace_pci_nvme_err_invalid_prp2_missing();
-status = NVME_INVALID_FIELD | NVME_DNR;
-goto unmap;
+return NVME_INVALID_FIELD | NVME_DNR;
 }
 
 if (len > n->page_size) {
@@ -326,13 +333,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
*qsg, QEMUIOVector *iov,
 if (i == n->max_prp_ents - 1 && len > n->page_size) {
 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
-status = NVME_INVALID_FIELD | NVME_DNR;
-goto unmap;
+return NVME_INVALID_FIELD | NVME_DNR;
 }
 
 if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
-status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
-goto unmap;
+return NVME_INVALID_USE_OF_CMB | NVME_DNR;
 }
 
 i = 0;
@@ -345,14 +350,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
*qsg, QEMUIOVector *iov,
 
 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
-status = NVME_INVALID_FIELD | NVME_DNR;
-goto unmap;
+return NVME_INVALID_FIELD | NVME_DNR;
 }
 
 trans_len = MIN(len, n->page_size);
 status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
 if (status) {
-goto unmap;
+return status;
 }
 
 len -= trans_len;
@@ -361,27 +365,16 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
*qsg, QEMUIOVector *iov,
 } else {
 if (unlikely(prp2 & (n->page_size - 1))) {
 trace_pci_nvme_err_invalid_prp2_align(prp2);
-status = NVME_INVALID_FIELD | NVME_DNR;
-goto unmap;
+return NVME_INVALID_FIELD | NVME_DNR;
 }
 status = nvme_map_addr(n, qsg, iov, prp2, len);
 if (status) {
-goto unmap;
+return status;
 }
 }
 }
+
 return NVME_SUCCESS;
-
-unmap:
-if (iov && iov->iov) {
-qemu_iovec_destroy(iov);
-}
-
-if (qsg && qsg->sg) {
-qemu_sglist_destroy(qsg);
-}
-
-return status;
 }
 
 static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
@@ -601,13 +594,6 @@ static void nvme_rw_cb(void *opaque, int ret)
 req->status = NVME_INTERNAL_DEV_ERROR;
 }
 
-if (req->qsg.nalloc) {
-qemu_sglist_destroy(>qsg);
-}
-if (req->iov.nalloc) {
-qemu_iovec_destroy(>iov);
-}
-
 nvme_enqueue_req_completion(cq, req);
 }
 
-- 
2.27.0