Add secure buffer control flow to mtk_drm_gem.

When user space takes DRM_MTK_GEM_CREATE_ENCRYPTED flag and size
to create a mtk_drm_gem object, mtk_drm_gem will find a matched size
dma buffer from secure dma-heap and bind it to mtk_drm_gem object.

Signed-off-by: Jason-JH.Lin <jason-jh....@mediatek.com>
---
 drivers/gpu/drm/mediatek/mtk_drm_gem.c | 84 +++++++++++++++++++++++++-
 drivers/gpu/drm/mediatek/mtk_drm_gem.h |  4 ++
 2 files changed, 87 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c 
b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index bcce723f257d..2064ccd7dde0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -4,6 +4,8 @@
  */
 
 #include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
 #include <drm/mediatek_drm.h>
 
 #include <drm/drm.h>
@@ -55,6 +57,80 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct 
drm_device *dev,
        return mtk_gem_obj;
 }
 
+struct mtk_drm_gem_obj *mtk_drm_gem_create_from_heap(struct drm_device *dev,
+                                                    const char *heap, size_t 
size)
+{
+       struct mtk_drm_private *priv = dev->dev_private;
+       struct mtk_drm_gem_obj *mtk_gem;
+       struct drm_gem_object *obj;
+       struct dma_heap *dma_heap;
+       struct dma_buf *dma_buf;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sgt;
+       struct iosys_map map = {};
+       int ret;
+
+       mtk_gem = mtk_drm_gem_init(dev, size);
+       if (IS_ERR(mtk_gem))
+               return ERR_CAST(mtk_gem);
+
+       obj = &mtk_gem->base;
+
+       dma_heap = dma_heap_find(heap);
+       if (!dma_heap) {
+               DRM_ERROR("heap find fail\n");
+               goto err_gem_free;
+       }
+       dma_buf = dma_heap_buffer_alloc(dma_heap, size,
+                                       O_RDWR | O_CLOEXEC, 
DMA_HEAP_VALID_HEAP_FLAGS);
+       if (IS_ERR(dma_buf)) {
+               DRM_ERROR("buffer alloc fail\n");
+               dma_heap_put(dma_heap);
+               goto err_gem_free;
+       }
+       dma_heap_put(dma_heap);
+
+       attach = dma_buf_attach(dma_buf, priv->dma_dev);
+       if (IS_ERR(attach)) {
+               DRM_ERROR("attach fail, return\n");
+               dma_buf_put(dma_buf);
+               goto err_gem_free;
+       }
+       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(sgt)) {
+               DRM_ERROR("map failed, detach and return\n");
+               dma_buf_detach(dma_buf, attach);
+               dma_buf_put(dma_buf);
+               goto err_gem_free;
+       }
+       obj->import_attach = attach;
+       mtk_gem->dma_addr = sg_dma_address(sgt->sgl);
+       mtk_gem->sg = sgt;
+       mtk_gem->size = dma_buf->size;
+
+       if (!strcmp(heap, "mtk_svp") || !strcmp(heap, "mtk_svp_cma")) {
+               /* secure buffer can not be mapped */
+               mtk_gem->sec = true;
+       } else {
+               ret = dma_buf_vmap(dma_buf, &map);
+               mtk_gem->kvaddr = map.vaddr;
+               if (ret) {
+                       DRM_ERROR("map failed, ret=%d\n", ret);
+                       dma_buf_unmap_attachment(attach, sgt, 
DMA_BIDIRECTIONAL);
+                       dma_buf_detach(dma_buf, attach);
+                       dma_buf_put(dma_buf);
+                       mtk_gem->kvaddr = NULL;
+               }
+       }
+
+       return mtk_gem;
+
+err_gem_free:
+       drm_gem_object_release(obj);
+       kfree(mtk_gem);
+       return ERR_PTR(-ENOMEM);
+}
+
 struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
                                           size_t size, bool alloc_kmap)
 {
@@ -218,7 +294,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct 
drm_device *dev,
        if (IS_ERR(mtk_gem))
                return ERR_CAST(mtk_gem);
 
+       mtk_gem->sec = !sg_page(sg->sgl);
        mtk_gem->dma_addr = sg_dma_address(sg->sgl);
+       mtk_gem->size = attach->dmabuf->size;
        mtk_gem->sg = sg;
 
        return &mtk_gem->base;
@@ -290,7 +368,11 @@ int mtk_gem_create_ioctl(struct drm_device *dev, void 
*data,
        struct drm_mtk_gem_create *args = data;
        int ret;
 
-       mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+       if (args->flags & DRM_MTK_GEM_CREATE_ENCRYPTED)
+               mtk_gem = mtk_drm_gem_create_from_heap(dev, "mtk_svp_cma", 
args->size);
+       else
+               mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+
        if (IS_ERR(mtk_gem))
                return PTR_ERR(mtk_gem);
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h 
b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 90f3d2916ec5..ed4d23e252e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -27,9 +27,11 @@ struct mtk_drm_gem_obj {
        void                    *cookie;
        void                    *kvaddr;
        dma_addr_t              dma_addr;
+       size_t                  size;
        unsigned long           dma_attrs;
        struct sg_table         *sg;
        struct page             **pages;
+       bool                    sec;
 };
 
 #define to_mtk_gem_obj(x)      container_of(x, struct mtk_drm_gem_obj, base)
@@ -37,6 +39,8 @@ struct mtk_drm_gem_obj {
 void mtk_drm_gem_free_object(struct drm_gem_object *gem);
 struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
                                           bool alloc_kmap);
+struct mtk_drm_gem_obj *mtk_drm_gem_create_from_heap(struct drm_device *dev,
+                                                    const char *heap, size_t 
size);
 int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
                            struct drm_mode_create_dumb *args);
 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
-- 
2.18.0

Reply via email to