Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Klaus Jensen
On Jul 29 14:51, Andrzej Jakowski wrote:
> On 7/29/20 2:24 PM, Klaus Jensen wrote:
> > On Jul 29 13:40, Andrzej Jakowski wrote:
> >> On 7/20/20 4:37 AM, Klaus Jensen wrote:
> >>> From: Klaus Jensen 
> >>>
> >>> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> >>> use them in nvme_map_prp.
> >>>
> >>> This fixes a bug where in the case of a CMB transfer, the device would
> >>> map to the buffer with a wrong length.
> >>>
> >>> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> >>> CMBs.")
> >>> Signed-off-by: Klaus Jensen 
> >>> ---
> >>>  hw/block/nvme.c   | 109 +++---
> >>>  hw/block/trace-events |   2 +
> >>>  2 files changed, 94 insertions(+), 17 deletions(-)
> >>>
> >>> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> >>> index 4d7b730a62b6..9b1a080cdc70 100644
> >>> --- a/hw/block/nvme.c
> >>> +++ b/hw/block/nvme.c
> >>> @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> >>> QEMUIOVector *iov, uint64_t prp1,
> >>>  } else {
> >>>  if (unlikely(prp2 & (n->page_size - 1))) {
> >>>  trace_pci_nvme_err_invalid_prp2_align(prp2);
> >>> +status = NVME_INVALID_FIELD | NVME_DNR;
> >>>  goto unmap;
> >>>  }
> >>> -if (qsg->nsg) {
> >>> -qemu_sglist_add(qsg, prp2, len);
> >>> -} else {
> >>> -qemu_iovec_add(iov, (void *)>cmbuf[prp2 - 
> >>> n->ctrl_mem.addr], trans_len);
> >>> +status = nvme_map_addr(n, qsg, iov, prp2, len);
> >>> +if (status) {
> >>> +goto unmap;
> >>>  }
> >>>  }
> >>>  }
> >>>  return NVME_SUCCESS;
> >>>  
> >>> - unmap:
> >>> -qemu_sglist_destroy(qsg);
> >>> -return NVME_INVALID_FIELD | NVME_DNR;
> >>> +unmap:
> >>> +if (iov && iov->iov) {
> >>> +qemu_iovec_destroy(iov);
> >>> +}
> >>> +
> >>> +if (qsg && qsg->sg) {
> >>> +qemu_sglist_destroy(qsg);
> >>> +}
> >>> +
> >>> +return status;
> >>
> >> I think it would make sense to move whole unmap block to a separate 
> >> function.
> >> That function could be called from here and after completing IO and would 
> >> contain
> >> unified deinitialization block - so no code repetitions would be necessary.
> >> Other than that it looks good to me. Thx!
> >>
> >> Reviewed-by: Andrzej Jakowski 
> >>
> >  
> > Hi Andrzej,
> > 
> > Thanks for the review :)
> > 
> > Yes, this is done in a later patch ("hw/block/nvme: consolidate qsg/iov
> > clearing"), but kept here to reduce churn.
> > 
> Yep, noticed that after sending email :)
> Do you plan to submit second version of these patches incorporating some
> of the feedback?
> 

Yes, so you can defer your reviews for v2 ;)



Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Andrzej Jakowski
On 7/29/20 2:24 PM, Klaus Jensen wrote:
> On Jul 29 13:40, Andrzej Jakowski wrote:
>> On 7/20/20 4:37 AM, Klaus Jensen wrote:
>>> From: Klaus Jensen 
>>>
>>> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
>>> use them in nvme_map_prp.
>>>
>>> This fixes a bug where in the case of a CMB transfer, the device would
>>> map to the buffer with a wrong length.
>>>
>>> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
>>> CMBs.")
>>> Signed-off-by: Klaus Jensen 
>>> ---
>>>  hw/block/nvme.c   | 109 +++---
>>>  hw/block/trace-events |   2 +
>>>  2 files changed, 94 insertions(+), 17 deletions(-)
>>>
>>> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
>>> index 4d7b730a62b6..9b1a080cdc70 100644
>>> --- a/hw/block/nvme.c
>>> +++ b/hw/block/nvme.c
>>> @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
>>> QEMUIOVector *iov, uint64_t prp1,
>>>  } else {
>>>  if (unlikely(prp2 & (n->page_size - 1))) {
>>>  trace_pci_nvme_err_invalid_prp2_align(prp2);
>>> +status = NVME_INVALID_FIELD | NVME_DNR;
>>>  goto unmap;
>>>  }
>>> -if (qsg->nsg) {
>>> -qemu_sglist_add(qsg, prp2, len);
>>> -} else {
>>> -qemu_iovec_add(iov, (void *)>cmbuf[prp2 - 
>>> n->ctrl_mem.addr], trans_len);
>>> +status = nvme_map_addr(n, qsg, iov, prp2, len);
>>> +if (status) {
>>> +goto unmap;
>>>  }
>>>  }
>>>  }
>>>  return NVME_SUCCESS;
>>>  
>>> - unmap:
>>> -qemu_sglist_destroy(qsg);
>>> -return NVME_INVALID_FIELD | NVME_DNR;
>>> +unmap:
>>> +if (iov && iov->iov) {
>>> +qemu_iovec_destroy(iov);
>>> +}
>>> +
>>> +if (qsg && qsg->sg) {
>>> +qemu_sglist_destroy(qsg);
>>> +}
>>> +
>>> +return status;
>>
>> I think it would make sense to move whole unmap block to a separate function.
>> That function could be called from here and after completing IO and would 
>> contain
>> unified deinitialization block - so no code repetitions would be necessary.
>> Other than that it looks good to me. Thx!
>>
>> Reviewed-by: Andrzej Jakowski 
>>
>  
> Hi Andrzej,
> 
> Thanks for the review :)
> 
> Yes, this is done in a later patch ("hw/block/nvme: consolidate qsg/iov
> clearing"), but kept here to reduce churn.
> 
Yep, noticed that after sending email :)
Do you plan to submit second version of these patches incorporating some
of the feedback?




Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Klaus Jensen
On Jul 29 13:40, Andrzej Jakowski wrote:
> On 7/20/20 4:37 AM, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> > use them in nvme_map_prp.
> > 
> > This fixes a bug where in the case of a CMB transfer, the device would
> > map to the buffer with a wrong length.
> > 
> > Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> > CMBs.")
> > Signed-off-by: Klaus Jensen 
> > ---
> >  hw/block/nvme.c   | 109 +++---
> >  hw/block/trace-events |   2 +
> >  2 files changed, 94 insertions(+), 17 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 4d7b730a62b6..9b1a080cdc70 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> > QEMUIOVector *iov, uint64_t prp1,
> >  } else {
> >  if (unlikely(prp2 & (n->page_size - 1))) {
> >  trace_pci_nvme_err_invalid_prp2_align(prp2);
> > +status = NVME_INVALID_FIELD | NVME_DNR;
> >  goto unmap;
> >  }
> > -if (qsg->nsg) {
> > -qemu_sglist_add(qsg, prp2, len);
> > -} else {
> > -qemu_iovec_add(iov, (void *)>cmbuf[prp2 - 
> > n->ctrl_mem.addr], trans_len);
> > +status = nvme_map_addr(n, qsg, iov, prp2, len);
> > +if (status) {
> > +goto unmap;
> >  }
> >  }
> >  }
> >  return NVME_SUCCESS;
> >  
> > - unmap:
> > -qemu_sglist_destroy(qsg);
> > -return NVME_INVALID_FIELD | NVME_DNR;
> > +unmap:
> > +if (iov && iov->iov) {
> > +qemu_iovec_destroy(iov);
> > +}
> > +
> > +if (qsg && qsg->sg) {
> > +qemu_sglist_destroy(qsg);
> > +}
> > +
> > +return status;
> 
> I think it would make sense to move whole unmap block to a separate function.
> That function could be called from here and after completing IO and would 
> contain
> unified deinitialization block - so no code repetitions would be necessary.
> Other than that it looks good to me. Thx!
> 
> Reviewed-by: Andrzej Jakowski 
> 
 
Hi Andrzej,

Thanks for the review :)

Yes, this is done in a later patch ("hw/block/nvme: consolidate qsg/iov
clearing"), but kept here to reduce churn.



Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Andrzej Jakowski
On 7/20/20 4:37 AM, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> use them in nvme_map_prp.
> 
> This fixes a bug where in the case of a CMB transfer, the device would
> map to the buffer with a wrong length.
> 
> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> CMBs.")
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c   | 109 +++---
>  hw/block/trace-events |   2 +
>  2 files changed, 94 insertions(+), 17 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 4d7b730a62b6..9b1a080cdc70 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
>  return le16_to_cpu(req->sq->sqid);
>  }
>  
> +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
> +{
> +return >cmbuf[addr - n->ctrl_mem.addr];
> +}
> +
>  static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  {
>  hwaddr low = n->ctrl_mem.addr;
> @@ -120,7 +125,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
>  {
>  if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
> -memcpy(buf, (void *)>cmbuf[addr - n->ctrl_mem.addr], size);
> +memcpy(buf, nvme_addr_to_cmb(n, addr), size);
>  return;
>  }
>  
> @@ -203,29 +208,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue 
> *cq)
>  }
>  }
>  
> +static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr 
> addr,
> +  size_t len)
> +{
> +if (!len) {
> +return NVME_SUCCESS;
> +}
> +
> +trace_pci_nvme_map_addr_cmb(addr, len);
> +
> +if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
> +return NVME_DATA_TRAS_ERROR;
> +}
> +
> +qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
> +
> +return NVME_SUCCESS;
> +}
> +
> +static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector 
> *iov,
> +  hwaddr addr, size_t len)
> +{
> +if (!len) {
> +return NVME_SUCCESS;
> +}
> +
> +trace_pci_nvme_map_addr(addr, len);
> +
> +if (nvme_addr_is_cmb(n, addr)) {
> +if (qsg && qsg->sg) {
> +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +}
> +
> +assert(iov);
> +
> +if (!iov->iov) {
> +qemu_iovec_init(iov, 1);
> +}
> +
> +return nvme_map_addr_cmb(n, iov, addr, len);
> +}
> +
> +if (iov && iov->iov) {
> +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +}
> +
> +assert(qsg);
> +
> +if (!qsg->sg) {
> +pci_dma_sglist_init(qsg, >parent_obj, 1);
> +}
> +
> +qemu_sglist_add(qsg, addr, len);
> +
> +return NVME_SUCCESS;
> +}
> +
>  static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t 
> prp1,
>   uint64_t prp2, uint32_t len, NvmeCtrl *n)
>  {
>  hwaddr trans_len = n->page_size - (prp1 % n->page_size);
>  trans_len = MIN(len, trans_len);
>  int num_prps = (len >> n->page_bits) + 1;
> +uint16_t status;
>  
>  if (unlikely(!prp1)) {
>  trace_pci_nvme_err_invalid_prp();
>  return NVME_INVALID_FIELD | NVME_DNR;
> -} else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
> -   prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> -qsg->nsg = 0;
> +}
> +
> +if (nvme_addr_is_cmb(n, prp1)) {
>  qemu_iovec_init(iov, num_prps);
> -qemu_iovec_add(iov, (void *)>cmbuf[prp1 - n->ctrl_mem.addr], 
> trans_len);
>  } else {
>  pci_dma_sglist_init(qsg, >parent_obj, num_prps);
> -qemu_sglist_add(qsg, prp1, trans_len);
>  }
> +
> +status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> +if (status) {
> +goto unmap;
> +}
> +
>  len -= trans_len;
>  if (len) {
>  if (unlikely(!prp2)) {
>  trace_pci_nvme_err_invalid_prp2_missing();
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  if (len > n->page_size) {
> @@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  if (i == n->max_prp_ents - 1 && len > n->page_size) {
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  
> @@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +   

Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Klaus Jensen
On Jul 29 16:57, Maxim Levitsky wrote:
> On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> > From: Klaus Jensen 
> > 
> > Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> > use them in nvme_map_prp.
> > 
> > This fixes a bug where in the case of a CMB transfer, the device would
> > map to the buffer with a wrong length.
> > 
> > Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> > CMBs.")
> > Signed-off-by: Klaus Jensen 
> > ---
> >  hw/block/nvme.c   | 109 +++---
> >  hw/block/trace-events |   2 +
> >  2 files changed, 94 insertions(+), 17 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 4d7b730a62b6..9b1a080cdc70 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
> >  return le16_to_cpu(req->sq->sqid);
> >  }
> >  
> > +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
> > +{
> > +return >cmbuf[addr - n->ctrl_mem.addr];
> I would add an assert here just in case we do out of bounds array access.

We never end up in nvme_addr_to_cmb without nvme_addr_is_cmb checking
the bounds. But an assert cant hurt if someone decides to use it in
error.

I'll add it!



Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Minwoo Im
Hi Klaus,

On 20-07-20 13:37:34, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> use them in nvme_map_prp.
> 
> This fixes a bug where in the case of a CMB transfer, the device would
> map to the buffer with a wrong length.
> 
> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> CMBs.")
> Signed-off-by: Klaus Jensen 

>  static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t 
> prp1,
>   uint64_t prp2, uint32_t len, NvmeCtrl *n)
>  {
>  hwaddr trans_len = n->page_size - (prp1 % n->page_size);
>  trans_len = MIN(len, trans_len);
>  int num_prps = (len >> n->page_bits) + 1;
> +uint16_t status;
>  
>  if (unlikely(!prp1)) {
>  trace_pci_nvme_err_invalid_prp();
>  return NVME_INVALID_FIELD | NVME_DNR;
> -} else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
> -   prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> -qsg->nsg = 0;
> +}
> +
> +if (nvme_addr_is_cmb(n, prp1)) {
>  qemu_iovec_init(iov, num_prps);
> -qemu_iovec_add(iov, (void *)>cmbuf[prp1 - n->ctrl_mem.addr], 
> trans_len);
>  } else {
>  pci_dma_sglist_init(qsg, >parent_obj, num_prps);
> -qemu_sglist_add(qsg, prp1, trans_len);
>  }
> +
> +status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> +if (status) {
> +goto unmap;
> +}
> +
>  len -= trans_len;
>  if (len) {
>  if (unlikely(!prp2)) {
>  trace_pci_nvme_err_invalid_prp2_missing();
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  if (len > n->page_size) {
> @@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  if (i == n->max_prp_ents - 1 && len > n->page_size) {
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  
> @@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  
>  trans_len = MIN(len, n->page_size);
> -if (qsg->nsg){
> -qemu_sglist_add(qsg, prp_ent, trans_len);
> -} else {
> -qemu_iovec_add(iov, (void *)>cmbuf[prp_ent - 
> n->ctrl_mem.addr], trans_len);
> +status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
> +if (status) {
> +goto unmap;
>  }
>  len -= trans_len;
>  i++;
> @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  } else {
>  if (unlikely(prp2 & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prp2_align(prp2);
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
> -if (qsg->nsg) {
> -qemu_sglist_add(qsg, prp2, len);
> -} else {
> -qemu_iovec_add(iov, (void *)>cmbuf[prp2 - 
> n->ctrl_mem.addr], trans_len);
> +status = nvme_map_addr(n, qsg, iov, prp2, len);
> +if (status) {
> +goto unmap;

I like these parts which is much better to read the codes.

Reviewed-by: Minwoo Im 

Thanks,



Re: [PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-29 Thread Maxim Levitsky
On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> use them in nvme_map_prp.
> 
> This fixes a bug where in the case of a CMB transfer, the device would
> map to the buffer with a wrong length.
> 
> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in 
> CMBs.")
> Signed-off-by: Klaus Jensen 
> ---
>  hw/block/nvme.c   | 109 +++---
>  hw/block/trace-events |   2 +
>  2 files changed, 94 insertions(+), 17 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 4d7b730a62b6..9b1a080cdc70 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
>  return le16_to_cpu(req->sq->sqid);
>  }
>  
> +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
> +{
> +return >cmbuf[addr - n->ctrl_mem.addr];
I would add an assert here just in case we do out of bounds array access.
> +}
> +
>  static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  {
>  hwaddr low = n->ctrl_mem.addr;
> @@ -120,7 +125,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
>  {
>  if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
> -memcpy(buf, (void *)>cmbuf[addr - n->ctrl_mem.addr], size);
> +memcpy(buf, nvme_addr_to_cmb(n, addr), size);
OK.
>  return;
>  }
>  
> @@ -203,29 +208,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue 
> *cq)
>  }
>  }
>  
> +static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr 
> addr,
> +  size_t len)
> +{
> +if (!len) {
> +return NVME_SUCCESS;
> +}
> +
> +trace_pci_nvme_map_addr_cmb(addr, len);
> +
> +if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
> +return NVME_DATA_TRAS_ERROR;
> +}
> +
> +qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
> +
> +return NVME_SUCCESS;
> +}
OK
> +
> +static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector 
> *iov,
> +  hwaddr addr, size_t len)
> +{
> +if (!len) {
> +return NVME_SUCCESS;
> +}
> +
> +trace_pci_nvme_map_addr(addr, len);
> +
> +if (nvme_addr_is_cmb(n, addr)) {
> +if (qsg && qsg->sg) {
> +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +}
> +
> +assert(iov);
> +
> +if (!iov->iov) {
> +qemu_iovec_init(iov, 1);
> +}
> +
> +return nvme_map_addr_cmb(n, iov, addr, len);
> +}
> +
> +if (iov && iov->iov) {
> +return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> +}
> +
> +assert(qsg);
> +
> +if (!qsg->sg) {
> +pci_dma_sglist_init(qsg, >parent_obj, 1);
> +}
> +
> +qemu_sglist_add(qsg, addr, len);
> +
> +return NVME_SUCCESS;
> +}
OK
> +
>  static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t 
> prp1,
>   uint64_t prp2, uint32_t len, NvmeCtrl *n)
>  {
>  hwaddr trans_len = n->page_size - (prp1 % n->page_size);
>  trans_len = MIN(len, trans_len);
>  int num_prps = (len >> n->page_bits) + 1;
> +uint16_t status;
>  
>  if (unlikely(!prp1)) {
>  trace_pci_nvme_err_invalid_prp();
>  return NVME_INVALID_FIELD | NVME_DNR;
> -} else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
> -   prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> -qsg->nsg = 0;
> +}
> +
> +if (nvme_addr_is_cmb(n, prp1)) {
>  qemu_iovec_init(iov, num_prps);
> -qemu_iovec_add(iov, (void *)>cmbuf[prp1 - n->ctrl_mem.addr], 
> trans_len);
>  } else {
>  pci_dma_sglist_init(qsg, >parent_obj, num_prps);
> -qemu_sglist_add(qsg, prp1, trans_len);
>  }
> +
> +status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> +if (status) {
> +goto unmap;
> +}
> +
>  len -= trans_len;
>  if (len) {
>  if (unlikely(!prp2)) {
>  trace_pci_nvme_err_invalid_prp2_missing();
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  if (len > n->page_size) {
> @@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  if (i == n->max_prp_ents - 1 && len > n->page_size) {
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
>  trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> +status = NVME_INVALID_FIELD | NVME_DNR;
>  goto unmap;
>  }
>  
> @@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
> QEMUIOVector *iov, uint64_t prp1,
>  
>  if (unlikely(!prp_ent || prp_ent & (n->page_size - 

[PATCH 02/16] hw/block/nvme: add mapping helpers

2020-07-20 Thread Klaus Jensen
From: Klaus Jensen 

Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
use them in nvme_map_prp.

This fixes a bug where in the case of a CMB transfer, the device would
map to the buffer with a wrong length.

Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in CMBs.")
Signed-off-by: Klaus Jensen 
---
 hw/block/nvme.c   | 109 +++---
 hw/block/trace-events |   2 +
 2 files changed, 94 insertions(+), 17 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 4d7b730a62b6..9b1a080cdc70 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
 return le16_to_cpu(req->sq->sqid);
 }
 
+static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
+{
+return >cmbuf[addr - n->ctrl_mem.addr];
+}
+
 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
 {
 hwaddr low = n->ctrl_mem.addr;
@@ -120,7 +125,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
 static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
 {
 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
-memcpy(buf, (void *)>cmbuf[addr - n->ctrl_mem.addr], size);
+memcpy(buf, nvme_addr_to_cmb(n, addr), size);
 return;
 }
 
@@ -203,29 +208,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
 }
 }
 
+static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
+  size_t len)
+{
+if (!len) {
+return NVME_SUCCESS;
+}
+
+trace_pci_nvme_map_addr_cmb(addr, len);
+
+if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
+return NVME_DATA_TRAS_ERROR;
+}
+
+qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
+
+return NVME_SUCCESS;
+}
+
+static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
+  hwaddr addr, size_t len)
+{
+if (!len) {
+return NVME_SUCCESS;
+}
+
+trace_pci_nvme_map_addr(addr, len);
+
+if (nvme_addr_is_cmb(n, addr)) {
+if (qsg && qsg->sg) {
+return NVME_INVALID_USE_OF_CMB | NVME_DNR;
+}
+
+assert(iov);
+
+if (!iov->iov) {
+qemu_iovec_init(iov, 1);
+}
+
+return nvme_map_addr_cmb(n, iov, addr, len);
+}
+
+if (iov && iov->iov) {
+return NVME_INVALID_USE_OF_CMB | NVME_DNR;
+}
+
+assert(qsg);
+
+if (!qsg->sg) {
+pci_dma_sglist_init(qsg, >parent_obj, 1);
+}
+
+qemu_sglist_add(qsg, addr, len);
+
+return NVME_SUCCESS;
+}
+
 static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
  uint64_t prp2, uint32_t len, NvmeCtrl *n)
 {
 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
 trans_len = MIN(len, trans_len);
 int num_prps = (len >> n->page_bits) + 1;
+uint16_t status;
 
 if (unlikely(!prp1)) {
 trace_pci_nvme_err_invalid_prp();
 return NVME_INVALID_FIELD | NVME_DNR;
-} else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
-   prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
-qsg->nsg = 0;
+}
+
+if (nvme_addr_is_cmb(n, prp1)) {
 qemu_iovec_init(iov, num_prps);
-qemu_iovec_add(iov, (void *)>cmbuf[prp1 - n->ctrl_mem.addr], 
trans_len);
 } else {
 pci_dma_sglist_init(qsg, >parent_obj, num_prps);
-qemu_sglist_add(qsg, prp1, trans_len);
 }
+
+status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
+if (status) {
+goto unmap;
+}
+
 len -= trans_len;
 if (len) {
 if (unlikely(!prp2)) {
 trace_pci_nvme_err_invalid_prp2_missing();
+status = NVME_INVALID_FIELD | NVME_DNR;
 goto unmap;
 }
 if (len > n->page_size) {
@@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector 
*iov, uint64_t prp1,
 if (i == n->max_prp_ents - 1 && len > n->page_size) {
 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
+status = NVME_INVALID_FIELD | NVME_DNR;
 goto unmap;
 }
 
@@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, 
QEMUIOVector *iov, uint64_t prp1,
 
 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
+status = NVME_INVALID_FIELD | NVME_DNR;
 goto unmap;
 }
 
 trans_len = MIN(len, n->page_size);
-if (qsg->nsg){
-qemu_sglist_add(qsg, prp_ent, trans_len);
-} else {
-qemu_iovec_add(iov, (void *)>cmbuf[prp_ent - 
n->ctrl_mem.addr],