From: Robin Murphy <robin.mur...@arm.com>

Always remapping CMA allocations was largely a bodge to keep the freeing
logic manageable when it was split between here and an arch wrapper. Now
that it's all together and streamlined, we can relax that limitation.

Signed-off-by: Robin Murphy <robin.mur...@arm.com>
Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 drivers/iommu/dma-iommu.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 41e87756c076..3629bc2f59ee 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -973,7 +973,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
 {
        bool coherent = dev_is_dma_coherent(dev);
        int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-       pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
        size_t iosize = size;
        struct page *page;
        void *addr;
@@ -1021,13 +1020,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
        if (*handle == DMA_MAPPING_ERROR)
                goto out_free_pages;
 
-       addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
-                       __builtin_return_address(0));
-       if (!addr)
-               goto out_unmap;
+       if (!coherent || PageHighMem(page)) {
+               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
 
-       if (!coherent)
-               arch_dma_prep_coherent(page, iosize);
+               addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
+                               __builtin_return_address(0));
+               if (!addr)
+                       goto out_unmap;
+
+               if (!coherent)
+                       arch_dma_prep_coherent(page, iosize);
+       } else {
+               addr = page_address(page);
+       }
        memset(addr, 0, size);
        return addr;
 out_unmap:
-- 
2.20.1

Reply via email to