This patch adds system and system-contig pools to the dma-buf
pools framework.

This allows applications to get a page-allocator backed
dma-buf, of either non-contiguous or contiguous memory.

Cc: Laura Abbott <labb...@redhat.com>
Cc: Benjamin Gaignard <benjamin.gaign...@linaro.org>
Cc: Sumit Semwal <sumit.sem...@linaro.org>
Cc: Liam Mark <lm...@codeaurora.org>
Cc: Brian Starkey <brian.star...@arm.com>
Cc: Andrew F. Davis <a...@ti.com>
Cc: Chenbo Feng <fe...@google.com>
Cc: Alistair Strachan <astrac...@google.com>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: John Stultz <john.stu...@linaro.org>
---
 drivers/dma-buf/pools/Kconfig       |   7 +
 drivers/dma-buf/pools/Makefile      |   1 +
 drivers/dma-buf/pools/system_pool.c | 374 ++++++++++++++++++++++++++++++++++++
 3 files changed, 382 insertions(+)
 create mode 100644 drivers/dma-buf/pools/system_pool.c

diff --git a/drivers/dma-buf/pools/Kconfig b/drivers/dma-buf/pools/Kconfig
index caa7eb8..787b2a6 100644
--- a/drivers/dma-buf/pools/Kconfig
+++ b/drivers/dma-buf/pools/Kconfig
@@ -8,3 +8,10 @@ menuconfig DMABUF_POOLS
          which allow userspace to allocate dma-bufs that can be shared between
          drivers.
          If you're not using Android its probably safe to say N here.
+
+config DMABUF_POOLS_SYSTEM
+       bool "DMA-BUF System Pool"
+       depends on DMABUF_POOLS
+       help
+         Choose this option to enable the system dmabuf pool. The system pool
+         is backed by pages from the buddy allocator. If in doubt, say Y.
diff --git a/drivers/dma-buf/pools/Makefile b/drivers/dma-buf/pools/Makefile
index a51ec25..2ccf2a1 100644
--- a/drivers/dma-buf/pools/Makefile
+++ b/drivers/dma-buf/pools/Makefile
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DMABUF_POOLS)             += dmabuf-pools.o pool-ioctl.o 
pool-helpers.o page_pool.o
+obj-$(CONFIG_DMABUF_POOLS_SYSTEM)      += system_pool.o
diff --git a/drivers/dma-buf/pools/system_pool.c 
b/drivers/dma-buf/pools/system_pool.c
new file mode 100644
index 0000000..1756990
--- /dev/null
+++ b/drivers/dma-buf/pools/system_pool.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/dma-buf/pools/system_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "dmabuf-pools.h"
+
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+                                    __GFP_NORETRY) & ~__GFP_RECLAIM;
+static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
+static const unsigned int orders[] = {8, 4, 0};
+
+static int order_to_index(unsigned int order)
+{
+       int i;
+
+       for (i = 0; i < NUM_ORDERS; i++)
+               if (order == orders[i])
+                       return i;
+       WARN_ON(1);
+       return -1;
+}
+
+static inline unsigned int order_to_size(int order)
+{
+       return PAGE_SIZE << order;
+}
+
+struct system_pool {
+       struct dmabuf_pool pool;
+       struct dmabuf_page_pool *page_pools[NUM_ORDERS];
+};
+
+static struct page *alloc_buffer_page(struct system_pool *sys_pool,
+                                     struct dmabuf_pool_buffer *buffer,
+                                     unsigned long order)
+{
+       struct dmabuf_page_pool *pagepool =
+                               sys_pool->page_pools[order_to_index(order)];
+
+       return dmabuf_page_pool_alloc(pagepool);
+}
+
+static void free_buffer_page(struct system_pool *sys_pool,
+                            struct dmabuf_pool_buffer *buffer,
+                            struct page *page)
+{
+       struct dmabuf_page_pool *pagepool;
+       unsigned int order = compound_order(page);
+
+       /* go to system */
+       if (buffer->private_flags & DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE) {
+               __free_pages(page, order);
+               return;
+       }
+
+       pagepool = sys_pool->page_pools[order_to_index(order)];
+
+       dmabuf_page_pool_free(pagepool, page);
+}
+
+static struct page *alloc_largest_available(struct system_pool *sys_pool,
+                                           struct dmabuf_pool_buffer *buffer,
+                                           unsigned long size,
+                                           unsigned int max_order)
+{
+       struct page *page;
+       int i;
+
+       for (i = 0; i < NUM_ORDERS; i++) {
+               if (size < order_to_size(orders[i]))
+                       continue;
+               if (max_order < orders[i])
+                       continue;
+
+               page = alloc_buffer_page(sys_pool, buffer, orders[i]);
+               if (!page)
+                       continue;
+
+               return page;
+       }
+
+       return NULL;
+}
+
+static int system_pool_allocate(struct dmabuf_pool *pool,
+                                   struct dmabuf_pool_buffer *buffer,
+                                   unsigned long size,
+                                   unsigned long flags)
+{
+       struct system_pool *sys_pool = container_of(pool,
+                                                       struct system_pool,
+                                                       pool);
+       struct sg_table *table;
+       struct scatterlist *sg;
+       struct list_head pages;
+       struct page *page, *tmp_page;
+       int i = 0;
+       unsigned long size_remaining = PAGE_ALIGN(size);
+       unsigned int max_order = orders[0];
+
+       if (size / PAGE_SIZE > totalram_pages() / 2)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&pages);
+       while (size_remaining > 0) {
+               page = alloc_largest_available(sys_pool, buffer, size_remaining,
+                                              max_order);
+               if (!page)
+                       goto free_pages;
+               list_add_tail(&page->lru, &pages);
+               size_remaining -= PAGE_SIZE << compound_order(page);
+               max_order = compound_order(page);
+               i++;
+       }
+       table = kmalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               goto free_pages;
+
+       if (sg_alloc_table(table, i, GFP_KERNEL))
+               goto free_table;
+
+       sg = table->sgl;
+       list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+               sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+               sg = sg_next(sg);
+               list_del(&page->lru);
+       }
+
+       buffer->sg_table = table;
+       return 0;
+
+free_table:
+       kfree(table);
+free_pages:
+       list_for_each_entry_safe(page, tmp_page, &pages, lru)
+               free_buffer_page(sys_pool, buffer, page);
+       return -ENOMEM;
+}
+
+static void system_pool_free(struct dmabuf_pool_buffer *buffer)
+{
+       struct system_pool *sys_pool = container_of(buffer->pool,
+                                                       struct system_pool,
+                                                       pool);
+       struct sg_table *table = buffer->sg_table;
+       struct scatterlist *sg;
+       int i;
+
+       /* zero the buffer before goto page pool */
+       if (!(buffer->private_flags & DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE))
+               dmabuf_pool_buffer_zero(buffer);
+
+       for_each_sg(table->sgl, sg, table->nents, i)
+               free_buffer_page(sys_pool, buffer, sg_page(sg));
+       sg_free_table(table);
+       kfree(table);
+}
+
+static int system_pool_shrink(struct dmabuf_pool *pool, gfp_t gfp_mask,
+                                 int nr_to_scan)
+{
+       struct dmabuf_page_pool *page_pool;
+       struct system_pool *sys_pool;
+       int nr_total = 0;
+       int i, nr_freed;
+       int only_scan = 0;
+
+       sys_pool = container_of(pool, struct system_pool, pool);
+
+       if (!nr_to_scan)
+               only_scan = 1;
+
+       for (i = 0; i < NUM_ORDERS; i++) {
+               page_pool = sys_pool->page_pools[i];
+
+               if (only_scan) {
+                       nr_total += dmabuf_page_pool_shrink(page_pool,
+                                                        gfp_mask,
+                                                        nr_to_scan);
+
+               } else {
+                       nr_freed = dmabuf_page_pool_shrink(page_pool,
+                                                       gfp_mask,
+                                                       nr_to_scan);
+                       nr_to_scan -= nr_freed;
+                       nr_total += nr_freed;
+                       if (nr_to_scan <= 0)
+                               break;
+               }
+       }
+       return nr_total;
+}
+
+static struct dmabuf_pool_ops system_pool_ops = {
+       .allocate = system_pool_allocate,
+       .free = system_pool_free,
+       .map_kernel = dmabuf_pool_map_kernel,
+       .unmap_kernel = dmabuf_pool_unmap_kernel,
+       .map_user = dmabuf_pool_map_user,
+       .shrink = system_pool_shrink,
+};
+
+static void system_pool_destroy_pools(struct dmabuf_page_pool **page_pools)
+{
+       int i;
+
+       for (i = 0; i < NUM_ORDERS; i++)
+               if (page_pools[i])
+                       dmabuf_page_pool_destroy(page_pools[i]);
+}
+
+static int system_pool_create_pools(struct dmabuf_page_pool **page_pools)
+{
+       int i;
+       gfp_t gfp_flags = low_order_gfp_flags;
+
+       for (i = 0; i < NUM_ORDERS; i++) {
+               struct dmabuf_page_pool *pool;
+
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+
+               pool = dmabuf_page_pool_create(gfp_flags, orders[i]);
+               if (!pool)
+                       goto err_create_pool;
+               page_pools[i] = pool;
+       }
+       return 0;
+
+err_create_pool:
+       system_pool_destroy_pools(page_pools);
+       return -ENOMEM;
+}
+
+static struct dmabuf_pool *__system_pool_create(void)
+{
+       struct system_pool *sys_pool;
+
+       sys_pool = kzalloc(sizeof(*sys_pool), GFP_KERNEL);
+       if (!sys_pool)
+               return ERR_PTR(-ENOMEM);
+       sys_pool->pool.ops = &system_pool_ops;
+       sys_pool->pool.flags = DMABUF_POOL_FLAG_DEFER_FREE;
+
+       if (system_pool_create_pools(sys_pool->page_pools))
+               goto free_pool;
+
+       return &sys_pool->pool;
+
+free_pool:
+       kfree(sys_pool);
+       return ERR_PTR(-ENOMEM);
+}
+
+static int system_pool_create(void)
+{
+       struct dmabuf_pool *pool;
+
+       pool = __system_pool_create();
+       if (IS_ERR(pool))
+               return PTR_ERR(pool);
+       pool->name = "system_pool";
+
+       dmabuf_pool_add(pool);
+       return 0;
+}
+device_initcall(system_pool_create);
+
+static int system_contig_pool_allocate(struct dmabuf_pool *pool,
+                                          struct dmabuf_pool_buffer *buffer,
+                                          unsigned long len,
+                                          unsigned long flags)
+{
+       int order = get_order(len);
+       struct page *page;
+       struct sg_table *table;
+       unsigned long i;
+       int ret;
+
+       page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
+       if (!page)
+               return -ENOMEM;
+
+       split_page(page, order);
+
+       len = PAGE_ALIGN(len);
+       for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+               __free_page(page + i);
+
+       table = kmalloc(sizeof(*table), GFP_KERNEL);
+       if (!table) {
+               ret = -ENOMEM;
+               goto free_pages;
+       }
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto free_table;
+
+       sg_set_page(table->sgl, page, len, 0);
+
+       buffer->sg_table = table;
+
+       return 0;
+
+free_table:
+       kfree(table);
+free_pages:
+       for (i = 0; i < len >> PAGE_SHIFT; i++)
+               __free_page(page + i);
+
+       return ret;
+}
+
+static void system_contig_pool_free(struct dmabuf_pool_buffer *buffer)
+{
+       struct sg_table *table = buffer->sg_table;
+       struct page *page = sg_page(table->sgl);
+       unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+       unsigned long i;
+
+       for (i = 0; i < pages; i++)
+               __free_page(page + i);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct dmabuf_pool_ops kmalloc_ops = {
+       .allocate = system_contig_pool_allocate,
+       .free = system_contig_pool_free,
+       .map_kernel = dmabuf_pool_map_kernel,
+       .unmap_kernel = dmabuf_pool_unmap_kernel,
+       .map_user = dmabuf_pool_map_user,
+};
+
+static struct dmabuf_pool *__system_contig_pool_create(void)
+{
+       struct dmabuf_pool *pool;
+
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return ERR_PTR(-ENOMEM);
+       pool->ops = &kmalloc_ops;
+       pool->name = "system_contig_pool";
+       return pool;
+}
+
+static int system_contig_pool_create(void)
+{
+       struct dmabuf_pool *pool;
+
+       pool = __system_contig_pool_create();
+       if (IS_ERR(pool))
+               return PTR_ERR(pool);
+
+       dmabuf_pool_add(pool);
+       return 0;
+}
+device_initcall(system_contig_pool_create);
+
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to