From: Rob Herring <[email protected]>

Add support in DMA helpers to handle callers specifying
DRM_MODE_DUMB_KERNEL_MAP flag. Existing behavior is maintained with this
change. drm_gem_dma_dumb_create() always creates a kernel mapping as
before. drm_gem_dma_dumb_create_internal() lets the caller set the flags
as desired.

Most drivers that use drm_gem_dma_dumb_create_internal() are already
using the internal client interface via drm_client_setup*() and
DRM_FBDEV_DMA_DRIVER_OPS, and do not have other direct kernel
accesses to the buffer memory internally. Therefore, there is no need
to add the DRM_MODE_DUMB_KERNEL_MAP flag for them.

The only outlier is the adp driver. The DRM_MODE_DUMB_KERNEL_MAP flag
is added to its dumb_create() function, adp_drm_gem_dumb_create().

Signed-off-by: Rob Herring <[email protected]>
[[email protected]: Rebase onto renamed GEM DMA helpers]
[[email protected]: show "vaddr=(no mapping)" in drm_gem_dma_print_info()]
[[email protected]: Drop DRM_MODE_DUMB_KERNEL_MAP driver changes]
[[email protected]: Add DRM_MODE_DUMB_KERNEL_MAP to adp_drm_gem_dumb_create()]
[[email protected]: Add flags field to drm_gem_dma_create_with_handle()
                     kerneldoc]
Signed-off-by: Chen-Yu Tsai <[email protected]>
---
Changes since v1:
- Rebased onto renamed GEM DMA helpers
- Added check in drm_fb_dma_get_scanout_buffer() and drm_gem_dma_vmap().
- Made drm_gem_dma_print_info() show "vaddr=(no mapping)" for objects
  allocated without kernel mapping
- Dropped existing DRM_MODE_DUMB_KERNEL_MAP flag addition in various
  drivers
- Added DRM_MODE_DUMB_KERNEL_MAP flag to adp_drm_gem_dumb_create()
- Added flags field kerneldoc for drm_gem_dma_create_with_handle()

Cc: Sasha Finkelstein <[email protected]>
Cc: Janne Grunau <[email protected]>
Cc: [email protected]
---
 drivers/gpu/drm/adp/adp_drv.c        |  1 +
 drivers/gpu/drm/drm_fb_dma_helper.c  |  4 ++
 drivers/gpu/drm/drm_gem_dma_helper.c | 67 +++++++++++++++++-----------
 3 files changed, 47 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index 4554cf75565e..c549b44b3814 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -95,6 +95,7 @@ static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
 {
        args->height = ALIGN(args->height, 64);
        args->size = args->pitch * args->height;
+       args->flags = DRM_MODE_DUMB_KERNEL_MAP;
 
        return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
 }
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c 
b/drivers/gpu/drm/drm_fb_dma_helper.c
index fd71969d2fb1..12a44accc48c 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -187,6 +187,10 @@ int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
        if (!dma_obj->vaddr)
                return -ENODEV;
 
+       /* Buffer was allocated with NO_KERNEL_MAPPING */
+       if (dma_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+               return -ENODEV;
+
        iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr);
        sb->format = fb->format;
        sb->height = fb->height;
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c 
b/drivers/gpu/drm/drm_gem_dma_helper.c
index 9722c9fc86f3..281fb563f061 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -116,26 +116,8 @@ __drm_gem_dma_create(struct drm_device *drm, size_t size, 
bool private)
        return ERR_PTR(ret);
 }
 
-/**
- * drm_gem_dma_create - allocate an object with the given size
- * @drm: DRM device
- * @size: size of the object to allocate
- *
- * This function creates a DMA GEM object and allocates memory as backing 
store.
- * The allocated memory will occupy a contiguous chunk of bus address space.
- *
- * For devices that are directly connected to the memory bus then the allocated
- * memory will be physically contiguous. For devices that access through an
- * IOMMU, then the allocated memory is not expected to be physically contiguous
- * because having contiguous IOVAs is sufficient to meet a devices DMA
- * requirements.
- *
- * Returns:
- * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
-                                             size_t size)
+static struct drm_gem_dma_object *
+drm_gem_dma_create_flags(struct drm_device *drm, size_t size, u32 flags)
 {
        struct drm_gem_dma_object *dma_obj;
        int ret;
@@ -146,6 +128,9 @@ struct drm_gem_dma_object *drm_gem_dma_create(struct 
drm_device *drm,
        if (IS_ERR(dma_obj))
                return dma_obj;
 
+       if (!(flags & DRM_MODE_DUMB_KERNEL_MAP))
+               dma_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+
        if (dma_obj->map_noncoherent) {
                dma_obj->vaddr = dma_alloc_noncoherent(drm_dev_dma_dev(drm),
                                                       size,
@@ -171,6 +156,30 @@ struct drm_gem_dma_object *drm_gem_dma_create(struct 
drm_device *drm,
        drm_gem_object_put(&dma_obj->base);
        return ERR_PTR(ret);
 }
+
+/**
+ * drm_gem_dma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a DMA GEM object and allocates memory as backing 
store.
+ * The allocated memory will occupy a contiguous chunk of bus address space.
+ *
+ * For devices that are directly connected to the memory bus then the allocated
+ * memory will be physically contiguous. For devices that access through an
+ * IOMMU, then the allocated memory is not expected to be physically contiguous
+ * because having contiguous IOVAs is sufficient to meet a devices DMA
+ * requirements.
+ *
+ * Returns:
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+                                             size_t size)
+{
+       return drm_gem_dma_create_flags(drm, size, DRM_MODE_DUMB_KERNEL_MAP);
+}
 EXPORT_SYMBOL_GPL(drm_gem_dma_create);
 
 /**
@@ -179,6 +188,7 @@ EXPORT_SYMBOL_GPL(drm_gem_dma_create);
  * @file_priv: DRM file-private structure to register the handle for
  * @drm: DRM device
  * @size: size of the object to allocate
+ * @flags: DRM_MODE_DUMB_* flags if any
  * @handle: return location for the GEM handle
  *
  * This function creates a DMA GEM object, allocating a chunk of memory as
@@ -194,14 +204,14 @@ EXPORT_SYMBOL_GPL(drm_gem_dma_create);
  */
 static struct drm_gem_dma_object *
 drm_gem_dma_create_with_handle(struct drm_file *file_priv,
-                              struct drm_device *drm, size_t size,
+                              struct drm_device *drm, size_t size, u32 flags,
                               uint32_t *handle)
 {
        struct drm_gem_dma_object *dma_obj;
        struct drm_gem_object *gem_obj;
        int ret;
 
-       dma_obj = drm_gem_dma_create(drm, size);
+       dma_obj = drm_gem_dma_create_flags(drm, size, DRM_MODE_DUMB_KERNEL_MAP);
        if (IS_ERR(dma_obj))
                return dma_obj;
 
@@ -283,7 +293,7 @@ int drm_gem_dma_dumb_create_internal(struct drm_file 
*file_priv,
                args->size = args->pitch * args->height;
 
        dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
-                                                &args->handle);
+                                                args->flags, &args->handle);
        return PTR_ERR_OR_ZERO(dma_obj);
 }
 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
@@ -313,12 +323,13 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv,
        struct drm_gem_dma_object *dma_obj;
        int ret;
 
+       args->flags = DRM_MODE_DUMB_KERNEL_MAP;
        ret = drm_mode_size_dumb(drm, args, 0, 0);
        if (ret)
                return ret;
 
        dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
-                                                &args->handle);
+                                                args->flags, &args->handle);
        return PTR_ERR_OR_ZERO(dma_obj);
 }
 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
@@ -412,7 +423,10 @@ void drm_gem_dma_print_info(const struct 
drm_gem_dma_object *dma_obj,
                            struct drm_printer *p, unsigned int indent)
 {
        drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
-       drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
+       if (dma_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+               drm_printf_indent(p, indent, "vaddr=(no mapping)\n");
+       else
+               drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
 }
 EXPORT_SYMBOL(drm_gem_dma_print_info);
 
@@ -511,6 +525,9 @@ EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
 int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
                     struct iosys_map *map)
 {
+       if (dma_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+               return -ENOMEM;
+
        iosys_map_set_vaddr(map, dma_obj->vaddr);
 
        return 0;
-- 
2.53.0.851.ga537e3e6e9-goog

Reply via email to