Add an option to allocate uncached memory for dma_alloc_coherent from
the global dma_coherent_default_memory.  This will allow to move
arm-nommu (and eventually other platforms) to use generic code for
allocating uncached memory from a pre-populated pool.

Note that this is a different pool from the one that platforms that
can remap at runtime use for GFP_ATOMIC allocations for now, although
there might be opportunities to eventually end up with a common codebase
for the two use cases.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Tested-by: Dillon Min <dillon.min...@gmail.com>
---
 kernel/dma/Kconfig  |  4 ++++
 kernel/dma/direct.c | 15 +++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 77b405508743..725cfd51762b 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -93,6 +93,10 @@ config DMA_COHERENT_POOL
        select GENERIC_ALLOCATOR
        bool
 
+config DMA_GLOBAL_POOL
+       select DMA_DECLARE_COHERENT
+       bool
+
 config DMA_REMAP
        bool
        depends on MMU
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f737e3347059..d1d0258ed6d0 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 
        if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
            !dev_is_dma_coherent(dev))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
 
+       if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+           !dev_is_dma_coherent(dev))
+               return dma_alloc_from_global_coherent(dev, size, dma_handle);
+
        /*
         * Remapping or decrypting memory may block. If either is required and
         * we can't block, allocate the memory from the atomic pools.
@@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size,
 
        if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
            !dev_is_dma_coherent(dev)) {
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
                return;
        }
 
+       if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+           !dev_is_dma_coherent(dev)) {
+               if (!dma_release_from_global_coherent(page_order, cpu_addr))
+                       WARN_ON_ONCE(1);
+               return;
+       }
+
        /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
        if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
            dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
@@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct 
vm_area_struct *vma,
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
+       if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
+               return ret;
 
        if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
                return -ENXIO;
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to