[PATCH v4 2/7] dma-buf: heaps: Move heap-helper logic into the cma_heap implementation

2020-10-28 Thread John Stultz
Since the heap-helpers logic ended up not being as generic as
hoped, move the heap-helpers dma_buf_ops implementations into
the cma_heap directly.

This will allow us to remove the heap_helpers code in a following
patch.

Cc: Sumit Semwal 
Cc: Liam Mark 
Cc: Laura Abbott 
Cc: Brian Starkey 
Cc: Hridya Valsaraju 
Cc: Suren Baghdasaryan 
Cc: Sandeep Patil 
Cc: Daniel Mentz 
Cc: Chris Goldsworthy 
Cc: Ørjan Eide 
Cc: Robin Murphy 
Cc: Ezequiel Garcia 
Cc: Simon Ser 
Cc: James Jones 
Cc: linux-me...@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey 
Signed-off-by: John Stultz 
---
v2:
* Fix unused return value and locking issue Reported-by:
kernel test robot 
Julia Lawall 
* Make cma_heap_buf_ops static suggested by
kernel test robot 
* Fix uninitialized return in cma Reported-by:
kernel test robot 
* Minor cleanups
v3:
* Use the new sgtable mapping functions, as Suggested-by:
 Daniel Mentz 
v4:
* Spelling fix suggested by BrianS
---
 drivers/dma-buf/heaps/cma_heap.c | 314 ++-
 1 file changed, 265 insertions(+), 49 deletions(-)

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index e55384dc115b..5341e5e226d5 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -2,76 +2,290 @@
 /*
  * DMABUF CMA heap exporter
  *
- * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
  * Author:  for ST-Ericsson.
+ *
+ * Also utilizing parts of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis 
  */
-
 #include 
-#include 
 #include 
 #include 
 #include 
 #include 
-#include 
 #include 
+#include 
+#include 
 #include 
-#include 
 #include 
-#include 
+#include 
 
-#include "heap-helpers.h"
 
 struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
 };
 
-static void cma_heap_free(struct heap_helper_buffer *buffer)
+struct cma_heap_buffer {
+   struct cma_heap *heap;
+   struct list_head attachments;
+   struct mutex lock;
+   unsigned long len;
+   struct page *cma_pages;
+   struct page **pages;
+   pgoff_t pagecount;
+   int vmap_cnt;
+   void *vaddr;
+};
+
+struct dma_heap_attachment {
+   struct device *dev;
+   struct sg_table table;
+   struct list_head list;
+};
+
+static int cma_heap_attach(struct dma_buf *dmabuf,
+  struct dma_buf_attachment *attachment)
 {
-   struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
-   unsigned long nr_pages = buffer->pagecount;
-   struct page *cma_pages = buffer->priv_virt;
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a;
+   int ret;
 
-   /* free page list */
-   kfree(buffer->pages);
-   /* release memory */
-   cma_release(cma_heap->cma, cma_pages, nr_pages);
+   a = kzalloc(sizeof(*a), GFP_KERNEL);
+   if (!a)
+   return -ENOMEM;
+
+   ret = sg_alloc_table_from_pages(>table, buffer->pages,
+   buffer->pagecount, 0,
+   buffer->pagecount << PAGE_SHIFT,
+   GFP_KERNEL);
+   if (ret) {
+   kfree(a);
+   return ret;
+   }
+
+   a->dev = attachment->dev;
+   INIT_LIST_HEAD(>list);
+
+   attachment->priv = a;
+
+   mutex_lock(>lock);
+   list_add(>list, >attachments);
+   mutex_unlock(>lock);
+
+   return 0;
+}
+
+static void cma_heap_detach(struct dma_buf *dmabuf,
+   struct dma_buf_attachment *attachment)
+{
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a = attachment->priv;
+
+   mutex_lock(>lock);
+   list_del(>list);
+   mutex_unlock(>lock);
+
+   sg_free_table(>table);
+   kfree(a);
+}
+
+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment 
*attachment,
+enum dma_data_direction direction)
+{
+   struct dma_heap_attachment *a = attachment->priv;
+   struct sg_table *table = >table;
+   int ret;
+
+   ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+   if (ret)
+   return ERR_PTR(-ENOMEM);
+   return table;
+}
+
+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+  struct sg_table *table,
+  enum dma_data_direction direction)
+{
+   dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+enum dma_data_direction direction)
+{
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a;
+
+   if (buffer->vmap_cnt)
+   

[PATCH v4 2/7] dma-buf: heaps: Move heap-helper logic into the cma_heap implementation

2020-10-16 Thread John Stultz
Since the heap-helpers logic ended up not being as generic as
hoped, move the heap-helpers dma_buf_ops implementations into
the cma_heap directly.

This will allow us to remove the heap_helpers code in a following
patch.

Cc: Sumit Semwal 
Cc: Liam Mark 
Cc: Laura Abbott 
Cc: Brian Starkey 
Cc: Hridya Valsaraju 
Cc: Suren Baghdasaryan 
Cc: Sandeep Patil 
Cc: Daniel Mentz 
Cc: Chris Goldsworthy 
Cc: Ørjan Eide 
Cc: Robin Murphy 
Cc: Ezequiel Garcia 
Cc: Simon Ser 
Cc: James Jones 
Cc: linux-me...@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey 
Signed-off-by: John Stultz 
---
v2:
* Fix unused return value and locking issue Reported-by:
kernel test robot 
Julia Lawall 
* Make cma_heap_buf_ops static suggested by
kernel test robot 
* Fix uninitialized return in cma Reported-by:
kernel test robot 
* Minor cleanups
v3:
* Use the new sgtable mapping functions, as Suggested-by:
 Daniel Mentz 
v4:
* Spelling fix suggested by BrianS
---
 drivers/dma-buf/heaps/cma_heap.c | 317 ++-
 1 file changed, 267 insertions(+), 50 deletions(-)

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 626cf7fd033a..d6fab7cdeaf9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -2,76 +2,291 @@
 /*
  * DMABUF CMA heap exporter
  *
- * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
  * Author:  for ST-Ericsson.
+ *
+ * Also utilizing parts of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis 
  */
-
 #include 
-#include 
 #include 
-#include 
 #include 
+#include 
+#include 
 #include 
-#include 
 #include 
+#include 
+#include 
 #include 
-#include 
 #include 
-#include 
+#include 
 
-#include "heap-helpers.h"
 
 struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
 };
 
-static void cma_heap_free(struct heap_helper_buffer *buffer)
+struct cma_heap_buffer {
+   struct cma_heap *heap;
+   struct list_head attachments;
+   struct mutex lock;
+   unsigned long len;
+   struct page *cma_pages;
+   struct page **pages;
+   pgoff_t pagecount;
+   int vmap_cnt;
+   void *vaddr;
+};
+
+struct dma_heap_attachment {
+   struct device *dev;
+   struct sg_table table;
+   struct list_head list;
+};
+
+static int cma_heap_attach(struct dma_buf *dmabuf,
+  struct dma_buf_attachment *attachment)
 {
-   struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
-   unsigned long nr_pages = buffer->pagecount;
-   struct page *cma_pages = buffer->priv_virt;
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a;
+   int ret;
 
-   /* free page list */
-   kfree(buffer->pages);
-   /* release memory */
-   cma_release(cma_heap->cma, cma_pages, nr_pages);
+   a = kzalloc(sizeof(*a), GFP_KERNEL);
+   if (!a)
+   return -ENOMEM;
+
+   ret = sg_alloc_table_from_pages(>table, buffer->pages,
+   buffer->pagecount, 0,
+   buffer->pagecount << PAGE_SHIFT,
+   GFP_KERNEL);
+   if (ret) {
+   kfree(a);
+   return ret;
+   }
+
+   a->dev = attachment->dev;
+   INIT_LIST_HEAD(>list);
+
+   attachment->priv = a;
+
+   mutex_lock(>lock);
+   list_add(>list, >attachments);
+   mutex_unlock(>lock);
+
+   return 0;
+}
+
+static void cma_heap_detach(struct dma_buf *dmabuf,
+   struct dma_buf_attachment *attachment)
+{
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a = attachment->priv;
+
+   mutex_lock(>lock);
+   list_del(>list);
+   mutex_unlock(>lock);
+
+   sg_free_table(>table);
+   kfree(a);
+}
+
+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment 
*attachment,
+enum dma_data_direction direction)
+{
+   struct dma_heap_attachment *a = attachment->priv;
+   struct sg_table *table = >table;
+   int ret;
+
+   ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+   if (ret)
+   return ERR_PTR(-ENOMEM);
+   return table;
+}
+
+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+  struct sg_table *table,
+  enum dma_data_direction direction)
+{
+   dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+enum dma_data_direction direction)
+{
+   struct cma_heap_buffer *buffer = dmabuf->priv;
+   struct dma_heap_attachment *a;
+
+   if