On Fri, Sep 24, 2021 at 06:01:53PM +0800, John Garry wrote:
> It really is a property of the IOVA rcache code that we need to alloc a
> power-of-2 size, so relocate the functionality to resize into
> alloc_iova_fast(), rather than the callsites.
> 
> Signed-off-by: John Garry <john.ga...@huawei.com>

for vdpa code:

Acked-by: Michael S. Tsirkin <m...@redhat.com>

> ---
>  drivers/iommu/dma-iommu.c            | 8 --------
>  drivers/iommu/iova.c                 | 9 +++++++++
>  drivers/vdpa/vdpa_user/iova_domain.c | 8 --------
>  3 files changed, 9 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 896bea04c347..a99b3445fef8 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -444,14 +444,6 @@ static dma_addr_t iommu_dma_alloc_iova(struct 
> iommu_domain *domain,
>  
>       shift = iova_shift(iovad);
>       iova_len = size >> shift;
> -     /*
> -      * Freeing non-power-of-two-sized allocations back into the IOVA caches
> -      * will come back to bite us badly, so we have to waste a bit of space
> -      * rounding up anything cacheable to make sure that can't happen. The
> -      * order of the unadjusted size will still match upon freeing.
> -      */
> -     if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
> -             iova_len = roundup_pow_of_two(iova_len);
>  
>       dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
>  
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index 9e8bc802ac05..ff567cbc42f7 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -497,6 +497,15 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long 
> size,
>       unsigned long iova_pfn;
>       struct iova *new_iova;
>  
> +     /*
> +      * Freeing non-power-of-two-sized allocations back into the IOVA caches
> +      * will come back to bite us badly, so we have to waste a bit of space
> +      * rounding up anything cacheable to make sure that can't happen. The
> +      * order of the unadjusted size will still match upon freeing.
> +      */
> +     if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
> +             size = roundup_pow_of_two(size);
> +
>       iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
>       if (iova_pfn)
>               return iova_pfn;
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c 
> b/drivers/vdpa/vdpa_user/iova_domain.c
> index 1daae2608860..2b1143f11d8f 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -292,14 +292,6 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
>       unsigned long iova_len = iova_align(iovad, size) >> shift;
>       unsigned long iova_pfn;
>  
> -     /*
> -      * Freeing non-power-of-two-sized allocations back into the IOVA caches
> -      * will come back to bite us badly, so we have to waste a bit of space
> -      * rounding up anything cacheable to make sure that can't happen. The
> -      * order of the unadjusted size will still match upon freeing.
> -      */
> -     if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
> -             iova_len = roundup_pow_of_two(iova_len);
>       iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
>  
>       return iova_pfn << shift;
> -- 
> 2.26.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to