On Tue, 2020-02-04 at 10:52 +0100, Klaus Jensen wrote:
> Since clean up of the request qsg/iov has been moved to the common
> nvme_enqueue_req_completion function, there is no need to use a
> stack allocated qsg/iov in nvme_dma_prp.
>
> Signed-off-by: Klaus Jensen
> ---
> hw/block/nvme.c | 18 ++
> 1 file changed, 6 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index e97da35c4ca1..f8c81b9e2202 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -298,23 +298,21 @@ unmap:
> static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> uint64_t prp1, uint64_t prp2, DMADirection dir, NvmeRequest *req)
> {
> -QEMUSGList qsg;
> -QEMUIOVector iov;
> uint16_t status = NVME_SUCCESS;
> size_t bytes;
>
> -status = nvme_map_prp(n, , , prp1, prp2, len, req);
> +status = nvme_map_prp(n, >qsg, >iov, prp1, prp2, len, req);
> if (status) {
> return status;
> }
>
> -if (qsg.nsg > 0) {
> +if (req->qsg.nsg > 0) {
> uint64_t residual;
>
> if (dir == DMA_DIRECTION_TO_DEVICE) {
> -residual = dma_buf_write(ptr, len, );
> +residual = dma_buf_write(ptr, len, >qsg);
> } else {
> -residual = dma_buf_read(ptr, len, );
> +residual = dma_buf_read(ptr, len, >qsg);
> }
>
> if (unlikely(residual)) {
> @@ -322,15 +320,13 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr,
> uint32_t len,
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
>
> -qemu_sglist_destroy();
> -
> return status;
> }
>
> if (dir == DMA_DIRECTION_TO_DEVICE) {
> -bytes = qemu_iovec_to_buf(, 0, ptr, len);
> +bytes = qemu_iovec_to_buf(>iov, 0, ptr, len);
> } else {
> -bytes = qemu_iovec_from_buf(, 0, ptr, len);
> +bytes = qemu_iovec_from_buf(>iov, 0, ptr, len);
> }
>
> if (unlikely(bytes != len)) {
> @@ -338,8 +334,6 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr,
> uint32_t len,
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
>
> -qemu_iovec_destroy();
> -
> return status;
> }
>
Reviewed-by: Maxim Levitsky
Best regards,
Maxim Levitsky