This gives the mm subsystem the ability to increase fault handling
performance by proposing the insertion of a range of pages around the
faulty address in a single batch.

v4:
- Implement map_pages instead of huge_fault

Signed-off-by: Loïc Molinari <[email protected]>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 53 +++++++++++++++++++++++---
 1 file changed, 48 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index be89be1c804c..e151262332f9 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -567,24 +567,66 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, 
struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
 
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static bool drm_gem_shmem_fault_is_valid(struct drm_gem_object *obj,
+                                        pgoff_t pgoff)
+{
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+       if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
+           pgoff >= (obj->size >> PAGE_SHIFT) ||
+           shmem->madv < 0)
+               return false;
+
+       return true;
+}
+
+static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf,
+                                         pgoff_t start_pgoff,
+                                         pgoff_t end_pgoff)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-       loff_t num_pages = obj->size >> PAGE_SHIFT;
+       unsigned long addr, pfn;
        vm_fault_t ret;
+
+       start_pgoff -= vma->vm_pgoff;
+       end_pgoff -= vma->vm_pgoff;
+       addr = vma->vm_start + (start_pgoff << PAGE_SHIFT);
+
+       dma_resv_lock(shmem->base.resv, NULL);
+
+       if (unlikely(!drm_gem_shmem_fault_is_valid(obj, start_pgoff))) {
+               ret = VM_FAULT_SIGBUS;
+       } else {
+               /* Map a range of pages around the faulty address. */
+               do {
+                       pfn = page_to_pfn(shmem->pages[start_pgoff]);
+                       ret = vmf_insert_pfn(vma, addr, pfn);
+                       addr += PAGE_SIZE;
+               } while (++start_pgoff <= end_pgoff && ret == VM_FAULT_NOPAGE);
+       }
+
+       dma_resv_unlock(shmem->base.resv);
+
+       return ret;
+}
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
        struct page *page;
        pgoff_t page_offset;
+       vm_fault_t ret;
 
        /* Offset to faulty address in the VMA (without the fake offset). */
        page_offset = vmf->pgoff - vma->vm_pgoff;
 
        dma_resv_lock(shmem->base.resv, NULL);
 
-       if (page_offset >= num_pages ||
-           drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
-           shmem->madv < 0) {
+       if (unlikely(!drm_gem_shmem_fault_is_valid(obj, page_offset))) {
                ret = VM_FAULT_SIGBUS;
        } else {
                page = shmem->pages[page_offset];
@@ -632,6 +674,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct 
*vma)
 }
 
 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+       .map_pages = drm_gem_shmem_map_pages,
        .fault = drm_gem_shmem_fault,
        .open = drm_gem_shmem_vm_open,
        .close = drm_gem_shmem_vm_close,
-- 
2.47.3

Reply via email to