Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write

2020-07-29 Thread Klaus Jensen
On Jul 29 20:35, Maxim Levitsky wrote:
> On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > Refactor the nvme_dma_{read,write}_prp functions into a common function
> > taking a DMADirection parameter.
> > 
> > Signed-off-by: Klaus Jensen 
> > Reviewed-by: Maxim Levitsky 
> > ---
> >  hw/block/nvme.c | 88 -
> >  1 file changed, 43 insertions(+), 45 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 6a1a1626b87b..d314a604db81 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -361,55 +361,50 @@ unmap:
> >  return status;
> >  }
> >  
> > -static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> > -   uint64_t prp1, uint64_t prp2)
> > +static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> > + uint64_t prp1, uint64_t prp2, DMADirection 
> > dir)
> >  {
> >  QEMUSGList qsg;
> >  QEMUIOVector iov;
> >  uint16_t status = NVME_SUCCESS;
> >  
> > -if (nvme_map_prp(, , prp1, prp2, len, n)) {
> > -return NVME_INVALID_FIELD | NVME_DNR;
> > +status = nvme_map_prp(, , prp1, prp2, len, n);
> > +if (status) {
> > +return status;
> >  }
> > +
> >  if (qsg.nsg > 0) {
> > -if (dma_buf_write(ptr, len, )) {
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > +uint64_t residual;
> > +
> > +if (dir == DMA_DIRECTION_TO_DEVICE) {
> > +residual = dma_buf_write(ptr, len, );
> > +} else {
> > +residual = dma_buf_read(ptr, len, );
> >  }
> > -qemu_sglist_destroy();
> > -} else {
> > -if (qemu_iovec_to_buf(, 0, ptr, len) != len) {
> > -status = NVME_INVALID_FIELD | NVME_DNR;
> > -}
> > -qemu_iovec_destroy();
> > -}
> > -return status;
> > -}
> >  
> > -static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> > -uint64_t prp1, uint64_t prp2)
> > -{
> > -QEMUSGList qsg;
> > -QEMUIOVector iov;
> > -uint16_t status = NVME_SUCCESS;
> > -
> > -trace_pci_nvme_dma_read(prp1, prp2);
> > -
> > -if (nvme_map_prp(, , prp1, prp2, len, n)) {
> > -return NVME_INVALID_FIELD | NVME_DNR;
> > -}
> > -if (qsg.nsg > 0) {
> > -if (unlikely(dma_buf_read(ptr, len, ))) {
> > +if (unlikely(residual)) {
> >  trace_pci_nvme_err_invalid_dma();
> >  status = NVME_INVALID_FIELD | NVME_DNR;
> >  }
> > +
> >  qemu_sglist_destroy();
> >  } else {
> > -if (unlikely(qemu_iovec_from_buf(, 0, ptr, len) != len)) {
> > +size_t bytes;
> > +
> > +if (dir == DMA_DIRECTION_TO_DEVICE) {
> > +bytes = qemu_iovec_to_buf(, 0, ptr, len);
> > +} else {
> > +bytes = qemu_iovec_from_buf(, 0, ptr, len);
> > +}
> > +
> > +if (unlikely(bytes != len)) {
> >  trace_pci_nvme_err_invalid_dma();
> >  status = NVME_INVALID_FIELD | NVME_DNR;
> >  }
> > +
> >  qemu_iovec_destroy();
> >  }
> > +
> I know I reviewed this, but thinking now, why not to add an assert here
> that we don't have both iov and qsg with data.
> 

Good point. I added it after the nvme_map_prp call.



Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write

2020-07-29 Thread Maxim Levitsky
On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Refactor the nvme_dma_{read,write}_prp functions into a common function
> taking a DMADirection parameter.
> 
> Signed-off-by: Klaus Jensen 
> Reviewed-by: Maxim Levitsky 
> ---
>  hw/block/nvme.c | 88 -
>  1 file changed, 43 insertions(+), 45 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 6a1a1626b87b..d314a604db81 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -361,55 +361,50 @@ unmap:
>  return status;
>  }
>  
> -static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> -   uint64_t prp1, uint64_t prp2)
> +static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> + uint64_t prp1, uint64_t prp2, DMADirection dir)
>  {
>  QEMUSGList qsg;
>  QEMUIOVector iov;
>  uint16_t status = NVME_SUCCESS;
>  
> -if (nvme_map_prp(, , prp1, prp2, len, n)) {
> -return NVME_INVALID_FIELD | NVME_DNR;
> +status = nvme_map_prp(, , prp1, prp2, len, n);
> +if (status) {
> +return status;
>  }
> +
>  if (qsg.nsg > 0) {
> -if (dma_buf_write(ptr, len, )) {
> -status = NVME_INVALID_FIELD | NVME_DNR;
> +uint64_t residual;
> +
> +if (dir == DMA_DIRECTION_TO_DEVICE) {
> +residual = dma_buf_write(ptr, len, );
> +} else {
> +residual = dma_buf_read(ptr, len, );
>  }
> -qemu_sglist_destroy();
> -} else {
> -if (qemu_iovec_to_buf(, 0, ptr, len) != len) {
> -status = NVME_INVALID_FIELD | NVME_DNR;
> -}
> -qemu_iovec_destroy();
> -}
> -return status;
> -}
>  
> -static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> -uint64_t prp1, uint64_t prp2)
> -{
> -QEMUSGList qsg;
> -QEMUIOVector iov;
> -uint16_t status = NVME_SUCCESS;
> -
> -trace_pci_nvme_dma_read(prp1, prp2);
> -
> -if (nvme_map_prp(, , prp1, prp2, len, n)) {
> -return NVME_INVALID_FIELD | NVME_DNR;
> -}
> -if (qsg.nsg > 0) {
> -if (unlikely(dma_buf_read(ptr, len, ))) {
> +if (unlikely(residual)) {
>  trace_pci_nvme_err_invalid_dma();
>  status = NVME_INVALID_FIELD | NVME_DNR;
>  }
> +
>  qemu_sglist_destroy();
>  } else {
> -if (unlikely(qemu_iovec_from_buf(, 0, ptr, len) != len)) {
> +size_t bytes;
> +
> +if (dir == DMA_DIRECTION_TO_DEVICE) {
> +bytes = qemu_iovec_to_buf(, 0, ptr, len);
> +} else {
> +bytes = qemu_iovec_from_buf(, 0, ptr, len);
> +}
> +
> +if (unlikely(bytes != len)) {
>  trace_pci_nvme_err_invalid_dma();
>  status = NVME_INVALID_FIELD | NVME_DNR;
>  }
> +
>  qemu_iovec_destroy();
>  }
> +
I know I reviewed this, but thinking now, why not to add an assert here
that we don't have both iov and qsg with data.

Best regards,
Maxim Levitsky

>  return status;
>  }
>  
> @@ -840,8 +835,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd 
> *cmd, uint8_t rae,
>  nvme_clear_events(n, NVME_AER_TYPE_SMART);
>  }
>  
> -return nvme_dma_read_prp(n, (uint8_t *)  + off, trans_len, prp1,
> - prp2);
> +return nvme_dma_prp(n, (uint8_t *)  + off, trans_len, prp1, prp2,
> +DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
> @@ -862,8 +857,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd 
> *cmd, uint32_t buf_len,
>  
>  trans_len = MIN(sizeof(fw_log) - off, buf_len);
>  
> -return nvme_dma_read_prp(n, (uint8_t *) _log + off, trans_len, prp1,
> - prp2);
> +return nvme_dma_prp(n, (uint8_t *) _log + off, trans_len, prp1, prp2,
> +DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> @@ -887,7 +882,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd 
> *cmd, uint8_t rae,
>  
>  trans_len = MIN(sizeof(errlog) - off, buf_len);
>  
> -return nvme_dma_read_prp(n, (uint8_t *), trans_len, prp1, prp2);
> +return nvme_dma_prp(n, (uint8_t *), trans_len, prp1, prp2,
> +DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1042,8 +1038,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, 
> NvmeIdentify *c)
>  
>  trace_pci_nvme_identify_ctrl();
>  
> -return nvme_dma_read_prp(n, (uint8_t *)>id_ctrl, sizeof(n->id_ctrl),
> -prp1, prp2);
> +return nvme_dma_prp(n, (uint8_t *)>id_ctrl, sizeof(n->id_ctrl), prp1,
> +prp2, DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t 

Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write

2020-07-29 Thread Minwoo Im
Klaus,

On 20-07-20 13:37:37, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Refactor the nvme_dma_{read,write}_prp functions into a common function
> taking a DMADirection parameter.
> 
> Signed-off-by: Klaus Jensen 
> Reviewed-by: Maxim Levitsky 

Reviewed-by: Minwoo Im 

Thanks,



[PATCH 05/16] hw/block/nvme: refactor dma read/write

2020-07-20 Thread Klaus Jensen
From: Klaus Jensen 

Refactor the nvme_dma_{read,write}_prp functions into a common function
taking a DMADirection parameter.

Signed-off-by: Klaus Jensen 
Reviewed-by: Maxim Levitsky 
---
 hw/block/nvme.c | 88 -
 1 file changed, 43 insertions(+), 45 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 6a1a1626b87b..d314a604db81 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -361,55 +361,50 @@ unmap:
 return status;
 }
 
-static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
-   uint64_t prp1, uint64_t prp2)
+static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ uint64_t prp1, uint64_t prp2, DMADirection dir)
 {
 QEMUSGList qsg;
 QEMUIOVector iov;
 uint16_t status = NVME_SUCCESS;
 
-if (nvme_map_prp(, , prp1, prp2, len, n)) {
-return NVME_INVALID_FIELD | NVME_DNR;
+status = nvme_map_prp(, , prp1, prp2, len, n);
+if (status) {
+return status;
 }
+
 if (qsg.nsg > 0) {
-if (dma_buf_write(ptr, len, )) {
-status = NVME_INVALID_FIELD | NVME_DNR;
+uint64_t residual;
+
+if (dir == DMA_DIRECTION_TO_DEVICE) {
+residual = dma_buf_write(ptr, len, );
+} else {
+residual = dma_buf_read(ptr, len, );
 }
-qemu_sglist_destroy();
-} else {
-if (qemu_iovec_to_buf(, 0, ptr, len) != len) {
-status = NVME_INVALID_FIELD | NVME_DNR;
-}
-qemu_iovec_destroy();
-}
-return status;
-}
 
-static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
-uint64_t prp1, uint64_t prp2)
-{
-QEMUSGList qsg;
-QEMUIOVector iov;
-uint16_t status = NVME_SUCCESS;
-
-trace_pci_nvme_dma_read(prp1, prp2);
-
-if (nvme_map_prp(, , prp1, prp2, len, n)) {
-return NVME_INVALID_FIELD | NVME_DNR;
-}
-if (qsg.nsg > 0) {
-if (unlikely(dma_buf_read(ptr, len, ))) {
+if (unlikely(residual)) {
 trace_pci_nvme_err_invalid_dma();
 status = NVME_INVALID_FIELD | NVME_DNR;
 }
+
 qemu_sglist_destroy();
 } else {
-if (unlikely(qemu_iovec_from_buf(, 0, ptr, len) != len)) {
+size_t bytes;
+
+if (dir == DMA_DIRECTION_TO_DEVICE) {
+bytes = qemu_iovec_to_buf(, 0, ptr, len);
+} else {
+bytes = qemu_iovec_from_buf(, 0, ptr, len);
+}
+
+if (unlikely(bytes != len)) {
 trace_pci_nvme_err_invalid_dma();
 status = NVME_INVALID_FIELD | NVME_DNR;
 }
+
 qemu_iovec_destroy();
 }
+
 return status;
 }
 
@@ -840,8 +835,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, 
uint8_t rae,
 nvme_clear_events(n, NVME_AER_TYPE_SMART);
 }
 
-return nvme_dma_read_prp(n, (uint8_t *)  + off, trans_len, prp1,
- prp2);
+return nvme_dma_prp(n, (uint8_t *)  + off, trans_len, prp1, prp2,
+DMA_DIRECTION_FROM_DEVICE);
 }
 
 static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
@@ -862,8 +857,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, 
uint32_t buf_len,
 
 trans_len = MIN(sizeof(fw_log) - off, buf_len);
 
-return nvme_dma_read_prp(n, (uint8_t *) _log + off, trans_len, prp1,
- prp2);
+return nvme_dma_prp(n, (uint8_t *) _log + off, trans_len, prp1, prp2,
+DMA_DIRECTION_FROM_DEVICE);
 }
 
 static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
@@ -887,7 +882,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, 
uint8_t rae,
 
 trans_len = MIN(sizeof(errlog) - off, buf_len);
 
-return nvme_dma_read_prp(n, (uint8_t *), trans_len, prp1, prp2);
+return nvme_dma_prp(n, (uint8_t *), trans_len, prp1, prp2,
+DMA_DIRECTION_FROM_DEVICE);
 }
 
 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
@@ -1042,8 +1038,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, 
NvmeIdentify *c)
 
 trace_pci_nvme_identify_ctrl();
 
-return nvme_dma_read_prp(n, (uint8_t *)>id_ctrl, sizeof(n->id_ctrl),
-prp1, prp2);
+return nvme_dma_prp(n, (uint8_t *)>id_ctrl, sizeof(n->id_ctrl), prp1,
+prp2, DMA_DIRECTION_FROM_DEVICE);
 }
 
 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
@@ -1062,8 +1058,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeIdentify *c)
 
 ns = >namespaces[nsid - 1];
 
-return nvme_dma_read_prp(n, (uint8_t *)>id_ns, sizeof(ns->id_ns),
-prp1, prp2);
+return nvme_dma_prp(n, (uint8_t *)>id_ns, sizeof(ns->id_ns), prp1,
+prp2, DMA_DIRECTION_FROM_DEVICE);
 }
 
 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
@@ -1098,7 +1094,8 @@ static uint16_t