1. In swiotlb_alloc_coherent, the branch of __get_free_pages. Directly
   return vaddr on success, and pass vaddr to free_pages on failure.
2. So, we can directly transparent pass vaddr from __dma_free to
   swiotlb_free_coherent, keep consistent with swiotlb_alloc_coherent.

This patch have no functional change, but can obtain a bit performance
improvement.

Signed-off-by: Zhen Lei <thunder.leiz...@huawei.com>
---
 arch/arm64/mm/dma-mapping.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a6e757c..b2f2834 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -187,8 +187,6 @@ static void __dma_free(struct device *dev, size_t size,
                       void *vaddr, dma_addr_t dma_handle,
                       struct dma_attrs *attrs)
 {
-       void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
-
        size = PAGE_ALIGN(size);

        if (!is_device_dma_coherent(dev)) {
@@ -196,7 +194,7 @@ static void __dma_free(struct device *dev, size_t size,
                        return;
                vunmap(vaddr);
        }
-       __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+       __dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
 }

 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
--
2.5.0


Reply via email to