Create a new MediaTek CMA heap from the CMA reserved buffer.

In this heap, When the first allocating buffer, use cma_alloc to prepare
whole the CMA range, then send its range to TEE to protect and manage.
For the later allocating, we just adds the cma_used_size.

When SVP done, cma_release will release the buffer, then kernel may
reuse it.

For the "CMA" restricted heap, "struct cma *cma" is a common property,
not just for MediaTek, so put it into "struct restricted_heap" instead of
our private data.

Signed-off-by: Yong Wu <yong...@mediatek.com>
---
 drivers/dma-buf/heaps/Kconfig               |   2 +-
 drivers/dma-buf/heaps/restricted_heap.h     |   4 +
 drivers/dma-buf/heaps/restricted_heap_mtk.c | 120 +++++++++++++++++++-
 3 files changed, 123 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 84f748fb2856..58903bc62ac8 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -24,7 +24,7 @@ config DMABUF_HEAPS_RESTRICTED
 
 config DMABUF_HEAPS_RESTRICTED_MTK
        bool "MediaTek DMA-BUF Restricted Heap"
-       depends on DMABUF_HEAPS_RESTRICTED && TEE=y
+       depends on DMABUF_HEAPS_RESTRICTED && DMA_CMA && TEE=y
        help
          Enable restricted dma-buf heaps for MediaTek platform. This heap is 
backed by
          TEE client interfaces. If in doubt, say N.
diff --git a/drivers/dma-buf/heaps/restricted_heap.h 
b/drivers/dma-buf/heaps/restricted_heap.h
index 6c93f6d257dc..8d17c39b65de 100644
--- a/drivers/dma-buf/heaps/restricted_heap.h
+++ b/drivers/dma-buf/heaps/restricted_heap.h
@@ -21,6 +21,10 @@ struct restricted_heap {
 
        const struct restricted_heap_ops *ops;
 
+       struct cma              *cma;
+       unsigned long           cma_paddr;
+       unsigned long           cma_size;
+
        void                    *priv_data;
 };
 
diff --git a/drivers/dma-buf/heaps/restricted_heap_mtk.c 
b/drivers/dma-buf/heaps/restricted_heap_mtk.c
index 902add95bb7e..0180d04076e2 100644
--- a/drivers/dma-buf/heaps/restricted_heap_mtk.c
+++ b/drivers/dma-buf/heaps/restricted_heap_mtk.c
@@ -4,9 +4,11 @@
  *
  * Copyright (C) 2024 MediaTek Inc.
  */
+#include <linux/cma.h>
 #include <linux/dma-buf.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uuid.h>
@@ -23,6 +25,13 @@ enum mtk_secure_mem_type {
         * management is inside the TEE.
         */
        MTK_SECURE_MEMORY_TYPE_CM_TZ    = 1,
+       /*
+        * MediaTek dynamic chunk memory carved out from CMA.
+        * In normal case, the CMA could be used in kernel; When SVP start, we 
will
+        * allocate whole this CMA and pass whole the CMA PA and size into TEE 
to
+        * protect it, then the detail memory management also is inside the TEE.
+        */
+       MTK_SECURE_MEMORY_TYPE_CM_CMA   = 2,
 };
 
 enum mtk_secure_buffer_tee_cmd {
@@ -32,6 +41,8 @@ enum mtk_secure_buffer_tee_cmd {
         * [in]  value[0].a: The buffer size.
         *       value[0].b: alignment.
         * [in]  value[1].a: enum mtk_secure_mem_type.
+        * [in]  value[2].a: pa base in cma case.
+        *       value[2].b: The buffer size in cma case.
         * [out] value[3].a: The secure handle.
         */
        MTK_TZCMD_SECMEM_ZALLOC = 0x10000, /* MTK TEE Command ID Base */
@@ -52,6 +63,9 @@ struct mtk_restricted_heap_data {
 
        const enum mtk_secure_mem_type mem_type;
 
+       struct page             *cma_page;
+       unsigned long           cma_used_size;
+       struct mutex            lock; /* lock for cma_used_size */
 };
 
 static int mtk_tee_ctx_match(struct tee_ioctl_version_data *ver, const void 
*data)
@@ -125,6 +139,10 @@ static int mtk_tee_restrict_memory(struct restricted_heap 
*heap, struct restrict
        params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
        params[1].u.value.a = data->mem_type;
        params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+       if (heap->cma && data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+               params[2].u.value.a = heap->cma_paddr;
+               params[2].u.value.b = heap->cma_size;
+       }
        params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
        ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
                                   MTK_TZCMD_SECMEM_ZALLOC, params);
@@ -163,6 +181,48 @@ mtk_restricted_memory_free(struct restricted_heap *heap, 
struct restricted_buffe
 {
 }
 
+static int mtk_restricted_memory_cma_allocate(struct restricted_heap *heap,
+                                             struct restricted_buffer *buf)
+{
+       struct mtk_restricted_heap_data *data = heap->priv_data;
+       int ret = 0;
+       /*
+        * Allocate CMA only when allocating buffer for the first time, and just
+        * increase cma_used_size at the other time, Actually the memory
+        * allocating is within the TEE.
+        */
+       mutex_lock(&data->lock);
+       if (!data->cma_used_size) {
+               data->cma_page = cma_alloc(heap->cma, heap->cma_size >> 
PAGE_SHIFT,
+                                          get_order(PAGE_SIZE), false);
+               if (!data->cma_page) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+       } else if (data->cma_used_size + buf->size > heap->cma_size) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       data->cma_used_size += buf->size;
+
+out_unlock:
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+static void mtk_restricted_memory_cma_free(struct restricted_heap *heap,
+                                          struct restricted_buffer *buf)
+{
+       struct mtk_restricted_heap_data *data = heap->priv_data;
+
+       mutex_lock(&data->lock);
+       data->cma_used_size -= buf->size;
+       if (!data->cma_used_size)
+               cma_release(heap->cma, data->cma_page,
+                           heap->cma_size >> PAGE_SHIFT);
+       mutex_unlock(&data->lock);
+}
+
 static int mtk_restricted_heap_init(struct restricted_heap *heap)
 {
        struct mtk_restricted_heap_data *data = heap->priv_data;
@@ -184,21 +244,77 @@ static struct mtk_restricted_heap_data 
mtk_restricted_heap_data = {
        .mem_type               = MTK_SECURE_MEMORY_TYPE_CM_TZ,
 };
 
+static const struct restricted_heap_ops mtk_restricted_heap_ops_cma = {
+       .heap_init              = mtk_restricted_heap_init,
+       .memory_alloc           = mtk_restricted_memory_cma_allocate,
+       .memory_free            = mtk_restricted_memory_cma_free,
+       .memory_restrict        = mtk_tee_restrict_memory,
+       .memory_unrestrict      = mtk_tee_unrestrict_memory,
+};
+
+static struct mtk_restricted_heap_data mtk_restricted_heap_data_cma = {
+       .mem_type               = MTK_SECURE_MEMORY_TYPE_CM_CMA,
+};
+
 static struct restricted_heap mtk_restricted_heaps[] = {
        {
                .name           = "restricted_mtk_cm",
                .ops            = &mtk_restricted_heap_ops,
                .priv_data      = &mtk_restricted_heap_data,
        },
+       {
+               .name           = "restricted_mtk_cma",
+               .ops            = &mtk_restricted_heap_ops_cma,
+               .priv_data      = &mtk_restricted_heap_data_cma,
+       },
 };
 
+static int __init mtk_restricted_cma_init(struct reserved_mem *rmem)
+{
+       struct restricted_heap *rstrd_heap = mtk_restricted_heaps, 
*rstrd_heap_cma = NULL;
+       struct mtk_restricted_heap_data *data;
+       struct cma *cma;
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rstrd_heap++) {
+               data = rstrd_heap->priv_data;
+               if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+                       rstrd_heap_cma = rstrd_heap;
+                       break;
+               }
+       }
+       if (!rstrd_heap_cma)
+               return -EINVAL;
+
+       ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+                                   &cma);
+       if (ret) {
+               pr_err("%s: %s set up CMA fail\n", __func__, rmem->name);
+               return ret;
+       }
+
+       rstrd_heap_cma->cma = cma;
+       rstrd_heap_cma->cma_paddr = rmem->base;
+       rstrd_heap_cma->cma_size = rmem->size;
+       return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(restricted_cma, "mediatek,dynamic-restricted-region",
+                      mtk_restricted_cma_init);
+
 static int mtk_restricted_heap_initialize(void)
 {
        struct restricted_heap *rstrd_heap = mtk_restricted_heaps;
+       struct mtk_restricted_heap_data *data;
        unsigned int i;
 
-       for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rstrd_heap++)
-               restricted_heap_add(rstrd_heap);
+       for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rstrd_heap++) {
+               data = rstrd_heap->priv_data;
+               if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA && 
!rstrd_heap->cma)
+                       continue;
+               if (!restricted_heap_add(rstrd_heap))
+                       mutex_init(&data->lock);
+       }
        return 0;
 }
 module_init(mtk_restricted_heap_initialize);
-- 
2.25.1

Reply via email to