Create a separate "tmpfs" kernel mount for V3D. This will allow us to
move away from the shmemfs `shm_mnt` and gives the flexibility to do
things like set our own mount options. Here, the interest is to use
"huge=", which should allow us to enable the use of THP for our
shmem-backed objects.

Signed-off-by: Maíra Canal <mca...@igalia.com>
Reviewed-by: Iago Toral Quiroga <ito...@igalia.com>
---
 drivers/gpu/drm/v3d/Makefile    |  3 ++-
 drivers/gpu/drm/v3d/v3d_drv.h   |  9 +++++++
 drivers/gpu/drm/v3d/v3d_gem.c   |  3 +++
 drivers/gpu/drm/v3d/v3d_gemfs.c | 46 +++++++++++++++++++++++++++++++++
 4 files changed, 60 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/v3d/v3d_gemfs.c

diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index b7d673f1153b..fcf710926057 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -13,7 +13,8 @@ v3d-y := \
        v3d_trace_points.o \
        v3d_sched.o \
        v3d_sysfs.o \
-       v3d_submit.o
+       v3d_submit.o \
+       v3d_gemfs.o
 
 v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
 
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 1950c723dde1..d2ce8222771a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -119,6 +119,11 @@ struct v3d_dev {
        struct drm_mm mm;
        spinlock_t mm_lock;
 
+       /*
+        * tmpfs instance used for shmem backed objects
+        */
+       struct vfsmount *gemfs;
+
        struct work_struct overflow_mem_work;
 
        struct v3d_bin_job *bin_job;
@@ -519,6 +524,10 @@ void v3d_reset(struct v3d_dev *v3d);
 void v3d_invalidate_caches(struct v3d_dev *v3d);
 void v3d_clean_caches(struct v3d_dev *v3d);
 
+/* v3d_gemfs.c */
+void v3d_gemfs_init(struct v3d_dev *v3d);
+void v3d_gemfs_fini(struct v3d_dev *v3d);
+
 /* v3d_submit.c */
 void v3d_job_cleanup(struct v3d_job *job);
 void v3d_job_put(struct v3d_job *job);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 66f4b78a6b2e..faefbe497e8d 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -287,6 +287,8 @@ v3d_gem_init(struct drm_device *dev)
        v3d_init_hw_state(v3d);
        v3d_mmu_set_page_table(v3d);
 
+       v3d_gemfs_init(v3d);
+
        ret = v3d_sched_init(v3d);
        if (ret) {
                drm_mm_takedown(&v3d->mm);
@@ -304,6 +306,7 @@ v3d_gem_destroy(struct drm_device *dev)
        struct v3d_dev *v3d = to_v3d_dev(dev);
 
        v3d_sched_fini(v3d);
+       v3d_gemfs_fini(v3d);
 
        /* Waiting for jobs to finish would need to be done before
         * unregistering V3D.
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
new file mode 100644
index 000000000000..31cf5bd11e39
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2024 Raspberry Pi */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+#include "v3d_drv.h"
+
+void v3d_gemfs_init(struct v3d_dev *v3d)
+{
+       char huge_opt[] = "huge=within_size";
+       struct file_system_type *type;
+       struct vfsmount *gemfs;
+
+       /*
+        * By creating our own shmemfs mountpoint, we can pass in
+        * mount flags that better match our usecase. However, we
+        * only do so on platforms which benefit from it.
+        */
+       if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+               goto err;
+
+       type = get_fs_type("tmpfs");
+       if (!type)
+               goto err;
+
+       gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
+       if (IS_ERR(gemfs))
+               goto err;
+
+       v3d->gemfs = gemfs;
+       drm_info(&v3d->drm, "Using Transparent Hugepages\n");
+
+       return;
+
+err:
+       v3d->gemfs = NULL;
+       drm_notice(&v3d->drm,
+                  "Transparent Hugepage support is recommended for optimal 
performance on this platform!\n");
+}
+
+void v3d_gemfs_fini(struct v3d_dev *v3d)
+{
+       if (v3d->gemfs)
+               kern_unmount(v3d->gemfs);
+}
-- 
2.44.0

Reply via email to