Sparse DRM objects will store their backing pages in an xarray, to avoid
the overhead of preallocating a huge struct page pointer array when only
a very small range of indices might be assigned.

For now, only the definition of a sparse object as a union alternative
to a 'dense' object is provided, with functions that exploit it being
part of later commits.

Signed-off-by: Adrián Larumbe <adrian.laru...@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 68 +++++++++++++++++++++++++-
 include/drm/drm_gem_shmem_helper.h     | 23 ++++++++-
 2 files changed, 88 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..5f75eb1230f6 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -128,6 +128,31 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct 
drm_device *dev, size_t
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
 
+/**
+ * drm_gem_shmem_create_sparse - Allocate a sparse object with the given size
+ * @dev: DRM device
+ * @size: Size of the sparse object to allocate
+ *
+ * This function creates a sparse shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device 
*dev, size_t size)
+{
+       struct drm_gem_shmem_object *shmem =
+               __drm_gem_shmem_create(dev, size, false, NULL);
+
+       if (!IS_ERR(shmem)) {
+               shmem->sparse = true;
+               xa_init_flags(&shmem->xapages, XA_FLAGS_ALLOC);
+       }
+
+       return shmem;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create_sparse);
+
 /**
  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
  * given mountpoint
@@ -173,8 +198,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
                        sg_free_table(shmem->sgt);
                        kfree(shmem->sgt);
                }
-               if (shmem->pages)
-                       drm_gem_shmem_put_pages(shmem);
+
+               drm_gem_shmem_put_pages(shmem);
 
                drm_WARN_ON(obj->dev, shmem->pages_use_count);
 
@@ -196,6 +221,12 @@ static int drm_gem_shmem_get_pages(struct 
drm_gem_shmem_object *shmem)
        if (shmem->pages_use_count++ > 0)
                return 0;
 
+       /* We only allow increasing the user count in the case of
+        * sparse shmem objects with some backed pages for now
+        */
+       if (shmem->sparse && xa_empty(&shmem->xapages))
+               return -EINVAL;
+
        pages = drm_gem_get_pages(obj);
        if (IS_ERR(pages)) {
                drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
@@ -231,6 +262,14 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object 
*shmem)
 
        dma_resv_assert_held(shmem->base.resv);
 
+       if (!shmem->sparse) {
+               if (!shmem->pages)
+                       return;
+       } else {
+               /* Not implemented yet */
+               return;
+       }
+
        if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                return;
 
@@ -404,8 +443,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object 
*shmem,
 {
        struct drm_gem_object *obj = &shmem->base;
 
+       if (shmem->sparse) {
+               drm_err(obj->dev, "UM unmapping of sparse shmem objects not 
implemented\n");
+               return;
+       }
+
        if (drm_gem_is_imported(obj)) {
                dma_buf_vunmap(obj->dma_buf, map);
+       } else if (obj->import_attach) {
+               dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
                dma_resv_assert_held(shmem->base.resv);
 
@@ -541,6 +587,12 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        struct page *page;
        pgoff_t page_offset;
 
+       /* TODO: Implement UM mapping of sparse shmem objects */
+       if (drm_WARN_ON(obj->dev, shmem->sparse)) {
+               drm_err(obj->dev, "UM mapping of sparse shmem objects not 
implemented\n");
+               return VM_FAULT_SIGBUS;
+       }
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
@@ -566,8 +618,14 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct 
*vma)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       /* TODO: Implement UM mapping of sparse shmem objects */
+       if (drm_WARN_ON(obj->dev, shmem->sparse))
+               return;
+
        drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
 
+       drm_WARN_ON(obj->dev, obj->import_attach);
+
        dma_resv_lock(shmem->base.resv, NULL);
 
        /*
@@ -690,6 +748,9 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct 
drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
+       if (drm_WARN_ON(obj->dev, shmem->sparse))
+               return ERR_PTR(-EINVAL);
+
        drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
 
        return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> 
PAGE_SHIFT);
@@ -702,6 +763,9 @@ static struct sg_table 
*drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
        int ret;
        struct sg_table *sgt;
 
+       if (drm_WARN_ON(obj->dev, shmem->sparse))
+               return ERR_PTR(-EINVAL);
+
        if (shmem->sgt)
                return shmem->sgt;
 
diff --git a/include/drm/drm_gem_shmem_helper.h 
b/include/drm/drm_gem_shmem_helper.h
index cef5a6b5a4d6..00e47512b30f 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -6,6 +6,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
+#include <linux/xarray.h>
 
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
@@ -29,7 +30,10 @@ struct drm_gem_shmem_object {
        /**
         * @pages: Page table
         */
-       struct page **pages;
+       union {
+               struct page **pages;
+               struct xarray xapages;
+       };
 
        /**
         * @pages_use_count:
@@ -91,12 +95,18 @@ struct drm_gem_shmem_object {
         * @map_wc: map object write-combined (instead of using shmem defaults).
         */
        bool map_wc : 1;
+
+       /**
+        * @sparse: the object is only partially backed by pages
+        */
+       bool sparse : 1;
 };
 
 #define to_drm_gem_shmem_obj(obj) \
        container_of(obj, struct drm_gem_shmem_object, base)
 
 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, 
size_t size);
+struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device 
*dev, size_t size);
 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device 
*dev,
                                                           size_t size,
                                                           struct vfsmount 
*gemfs);
@@ -210,6 +220,10 @@ static inline struct sg_table 
*drm_gem_shmem_object_get_sg_table(struct drm_gem_
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       /* Use the specific sparse shmem get_sg_table function instead */
+       if (WARN_ON(shmem->sparse))
+               return ERR_PTR(-EINVAL);
+
        return drm_gem_shmem_get_sg_table(shmem);
 }
 
@@ -229,6 +243,10 @@ static inline int drm_gem_shmem_object_vmap(struct 
drm_gem_object *obj,
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       /* TODO: Implement kernel mapping of sparse shmem objects */
+       if (WARN_ON(shmem->sparse))
+               return -EACCES;
+
        return drm_gem_shmem_vmap(shmem, map);
 }
 
@@ -263,6 +281,9 @@ static inline int drm_gem_shmem_object_mmap(struct 
drm_gem_object *obj, struct v
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       if (shmem->sparse)
+               return -EACCES;
+
        return drm_gem_shmem_mmap(shmem, vma);
 }
 
-- 
2.48.1

Reply via email to