The CMA heap instantiation was initially developed by having the
contiguous DMA code call into the CMA heap to create a new instance
every time a reserved memory area is probed.

Turning the CMA heap into a module would create a dependency of the
kernel on a module, which doesn't work.

Let's turn the logic around and do the opposite: store all the reserved
memory CMA regions into the contiguous DMA code, and provide an iterator
for the heap to use when it probes.

Signed-off-by: Maxime Ripard <[email protected]>
---
 drivers/dma-buf/heaps/cma_heap.c | 18 ++----------------
 include/linux/dma-map-ops.h      |  5 +++++
 kernel/dma/contiguous.c          | 26 ++++++++++++++++++++++++--
 3 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 
bd3370b9a3f6d4e18885a1d0e8ba3f659b85ef47..f8a3d87f3ccee9630383ba28502eb40b10671cc2
 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -28,23 +28,10 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #define DEFAULT_CMA_NAME "default_cma_region"
 
-static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
-static unsigned int dma_areas_num __initdata;
-
-int __init dma_heap_cma_register_heap(struct cma *cma)
-{
-       if (dma_areas_num >= ARRAY_SIZE(dma_areas))
-               return -EINVAL;
-
-       dma_areas[dma_areas_num++] = cma;
-
-       return 0;
-}
-
 struct cma_heap {
        struct dma_heap *heap;
        struct cma *cma;
 };
 
@@ -412,22 +399,21 @@ static int __init __add_cma_heap(struct cma *cma, const 
char *name)
 }
 
 static int __init add_cma_heaps(void)
 {
        struct cma *default_cma = dev_get_cma_area(NULL);
+       struct cma *cma;
        unsigned int i;
        int ret;
 
        if (default_cma) {
                ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
                if (ret)
                        return ret;
        }
 
-       for (i = 0; i < dma_areas_num; i++) {
-               struct cma *cma = dma_areas[i];
-
+       for (i = 0; (cma = dma_contiguous_get_reserved_region(i)) != NULL; i++) 
{
                ret = __add_cma_heap(cma, cma_get_name(cma));
                if (ret) {
                        pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
                        continue;
                }
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 
60b63756df821d839436618f1fca2bfa3eabe075..3007c68a8ec5b85990d1938d04a2f05c1a71acdb
 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -110,10 +110,11 @@ bool dma_release_from_contiguous(struct device *dev, 
struct page *pages,
                                 int count);
 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
 void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
 
 void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+struct cma *dma_contiguous_get_reserved_region(unsigned int idx);
 #else /* CONFIG_DMA_CMA */
 static inline struct cma *dev_get_cma_area(struct device *dev)
 {
        return NULL;
 }
@@ -148,10 +149,14 @@ static inline void dma_free_contiguous(struct device 
*dev, struct page *page,
        __free_pages(page, get_order(size));
 }
 static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long 
size)
 {
 }
+static inline struct cma *dma_contiguous_get_reserved_region(unsigned int idx)
+{
+       return NULL;
+}
 #endif /* CONFIG_DMA_CMA*/
 
 #ifdef CONFIG_DMA_DECLARE_COHERENT
 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                dma_addr_t device_addr, size_t size);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 
c56004d314dc2e436cddf3b20a4ee6ce8178bee4..14bd54fb758537f01a6fe27318e7b683964e20b1
 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -456,10 +456,32 @@ void dma_free_contiguous(struct device *dev, struct page 
*page, size_t size)
 #include <linux/of_reserved_mem.h>
 
 #undef pr_fmt
 #define pr_fmt(fmt) fmt
 
+static struct cma *rmem_cma_areas[MAX_CMA_AREAS];
+static unsigned int rmem_cma_areas_num;
+
+static int rmem_cma_insert_area(struct cma *cma)
+{
+       if (rmem_cma_areas_num >= ARRAY_SIZE(rmem_cma_areas))
+               return -EINVAL;
+
+       rmem_cma_areas[rmem_cma_areas_num++] = cma;
+
+       return 0;
+}
+
+struct cma *dma_contiguous_get_reserved_region(unsigned int idx)
+{
+       if (idx >= rmem_cma_areas_num)
+               return NULL;
+
+       return rmem_cma_areas[idx];
+}
+EXPORT_SYMBOL_GPL(dma_contiguous_get_reserved_region);
+
 static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
 {
        dev->cma_area = rmem->priv;
        return 0;
 }
@@ -504,13 +526,13 @@ static int __init rmem_cma_setup(struct reserved_mem 
*rmem)
        rmem->priv = cma;
 
        pr_info("Reserved memory: created CMA memory pool at %pa, size %ld 
MiB\n",
                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 
-       err = dma_heap_cma_register_heap(cma);
+       err = rmem_cma_insert_area(cma);
        if (err)
-               pr_warn("Couldn't register CMA heap.");
+               pr_warn("Couldn't store CMA reserved area.");
 
        return 0;
 }
 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
 #endif

-- 
2.53.0

Reply via email to