On Thu, 11 Apr 2019, Christoph Hellwig wrote:
> Refactor the code a bit to make further changes easier.
> 
> Signed-off-by: Christoph Hellwig <h...@lst.de>

Reviewed-by: Stefano Stabellini <sstabell...@kernel.org>

> ---
>  drivers/xen/swiotlb-xen.c | 31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 9a951504dc12..5dcb06fe9667 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device 
> *dev, struct page *page,
>       if (dma_capable(dev, dev_addr, size) &&
>           !range_straddles_page_boundary(phys, size) &&
>               !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
> -             (swiotlb_force != SWIOTLB_FORCE)) {
> -             /* we are not interested in the dma_addr returned by
> -              * xen_dma_map_page, only in the potential cache flushes 
> executed
> -              * by the function. */
> -             xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
> -             return dev_addr;
> -     }
> +             swiotlb_force != SWIOTLB_FORCE)
> +             goto done;
>  
>       /*
>        * Oh well, have to allocate and map a bounce buffer.
> @@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device 
> *dev, struct page *page,
>               return DMA_MAPPING_ERROR;
>  
>       dev_addr = xen_phys_to_bus(map);
> -     xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
> -                                     dev_addr, map & ~PAGE_MASK, size, dir, 
> attrs);
>  
>       /*
>        * Ensure that the address returned is DMA'ble
>        */
> -     if (dma_capable(dev, dev_addr, size))
> -             return dev_addr;
> -
> -     attrs |= DMA_ATTR_SKIP_CPU_SYNC;
> -     swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
> +     if (unlikely(!dma_capable(dev, dev_addr, size))) {
> +             swiotlb_tbl_unmap_single(dev, map, size, dir,
> +                             attrs | DMA_ATTR_SKIP_CPU_SYNC);
> +             return DMA_MAPPING_ERROR;
> +     }
>  
> -     return DMA_MAPPING_ERROR;
> +     page = pfn_to_page(map >> PAGE_SHIFT);
> +     offset = map & ~PAGE_MASK;
> +done:
> +     /*
> +      * we are not interested in the dma_addr returned by xen_dma_map_page,
> +      * only in the potential cache flushes executed by the function.
> +      */
> +     xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
> +     return dev_addr;
>  }
>  
>  /*
> -- 
> 2.20.1
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to