From: Abdiel Janulgue <abdiel.janul...@linux.intel.com>

Fault handler to handle missing pages to be filled depending on an
object's backing storage. Handle also changes needed to refault pages
depending on fault handler usage.

Signed-off-by: Abdiel Janulgue <abdiel.janul...@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c   |  54 +++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h   |   3 +
 drivers/gpu/drm/i915/gem/i915_gem_mman.c   | 155 +++++++++++++++++++--
 drivers/gpu/drm/i915/gem/i915_gem_object.h |   2 +-
 drivers/gpu/drm/i915/i915_gem.c            |   2 +-
 5 files changed, 201 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 8d0251af5dfc..2194e2c3bdcd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
 #include "i915_drv.h"
 
 static int lmem_pread(struct drm_i915_gem_object *obj,
@@ -179,6 +180,59 @@ static int lmem_pwrite(struct drm_i915_gem_object *obj,
        return ret;
 }
 
+vm_fault_t i915_gem_fault_lmem(struct vm_fault *vmf)
+{
+       struct vm_area_struct *area = vmf->vma;
+       struct i915_mmap_offset *priv = area->vm_private_data;
+       struct drm_i915_gem_object *obj = priv->obj;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *i915 = to_i915(dev);
+       unsigned long size = area->vm_end - area->vm_start;
+       bool write = area->vm_flags & VM_WRITE;
+       vm_fault_t vmf_ret;
+       int i, ret;
+
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return VM_FAULT_SIGBUS;
+
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < size >> PAGE_SHIFT; i++) {
+               vmf_ret = vmf_insert_pfn(area,
+                                        (unsigned long)area->vm_start + i * 
PAGE_SIZE,
+                                        i915_gem_object_lmem_io_offset(obj, i) 
>> PAGE_SHIFT);
+               if (vmf_ret & VM_FAULT_ERROR) {
+                       ret = vm_fault_to_errno(vmf_ret, 0);
+                       goto err;
+               }
+       }
+
+       i915_gem_object_unpin_pages(obj);
+err:
+       switch (ret) {
+       case -EIO:
+               if (!intel_gt_is_wedged(&i915->gt))
+                       return VM_FAULT_SIGBUS;
+               /* fallthrough */
+       case -EAGAIN:
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+       case -EBUSY:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -ENOSPC:
+       case -EFAULT:
+               return VM_FAULT_SIGBUS;
+       default:
+               WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+               return VM_FAULT_SIGBUS;
+       }
+}
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
        .flags = I915_GEM_OBJECT_IS_MAPPABLE,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 43e6e715eeed..c3255eb6daa5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -7,6 +7,7 @@
 #define __I915_GEM_LMEM_H
 
 #include <linux/types.h>
+#include <linux/mman.h>
 
 struct drm_i915_private;
 struct drm_i915_gem_object;
@@ -24,6 +25,8 @@ i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object *obj,
 resource_size_t i915_gem_object_lmem_io_offset(struct drm_i915_gem_object *obj,
                                               unsigned long n);
 
+vm_fault_t i915_gem_fault_lmem(struct vm_fault *vmf);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a62657a1f011..304ea578fd30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -5,6 +5,7 @@
  */
 
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include "gt/intel_gt.h"
@@ -12,6 +13,7 @@
 #include "i915_drv.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_ioctls.h"
+#include "i915_gem_lmem.h"
 #include "i915_gem_object.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
@@ -371,7 +373,62 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        }
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
+{
+       struct vm_area_struct *area = vmf->vma;
+       struct i915_mmap_offset *priv = area->vm_private_data;
+       struct drm_i915_gem_object *obj = priv->obj;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       vm_fault_t vmf_ret;
+       unsigned long size = area->vm_end - area->vm_start;
+       bool write = area->vm_flags & VM_WRITE;
+       int i, ret;
+
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return VM_FAULT_SIGBUS;
+
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < size >> PAGE_SHIFT; i++) {
+               struct page *page = i915_gem_object_get_page(obj, i);
+               vmf_ret = vmf_insert_pfn(area,
+                                        (unsigned long)area->vm_start + i * 
PAGE_SIZE,
+                                        page_to_pfn(page));
+               if (vmf_ret & VM_FAULT_ERROR) {
+                       ret = vm_fault_to_errno(vmf_ret, 0);
+                       break;
+               }
+       }
+
+       i915_gem_object_unpin_pages(obj);
+err:
+       switch (ret) {
+       case -EIO:
+               if (!intel_gt_is_wedged(&dev_priv->gt))
+                       return VM_FAULT_SIGBUS;
+               /* fallthrough */
+       case -EAGAIN:
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+       case -EBUSY:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -ENOSPC:
+       case -EFAULT:
+               return VM_FAULT_SIGBUS;
+       default:
+               WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
        struct i915_mmap_offset *mmo;
@@ -380,21 +437,20 @@ void __i915_gem_object_release_mmap(struct 
drm_i915_gem_object *obj)
 
        obj->userfault_count = 0;
        list_del(&obj->userfault_link);
-       list_for_each_entry(mmo, &obj->mmap_offsets, offset)
-               drm_vma_node_unmap(&mmo->vma_node,
-                                  obj->base.dev->anon_inode->i_mapping);
+
+       mutex_lock(&obj->mmo_lock);
+       list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+               if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+                       drm_vma_node_unmap(&mmo->vma_node,
+                                          
obj->base.dev->anon_inode->i_mapping);
+       }
+       mutex_unlock(&obj->mmo_lock);
 
        for_each_ggtt_vma(vma, obj)
                i915_vma_unset_userfault(vma);
 }
 
 /**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
  * It is vital that we remove the page mapping if we have mapped a tiled
  * object through the GTT and then lose the fence register due to
  * resource pressure. Similarly if the object has been moved out of the
@@ -402,7 +458,7 @@ void __i915_gem_object_release_mmap(struct 
drm_i915_gem_object *obj)
  * mapping will then trigger a page fault on the next user access, allowing
  * fixup by i915_gem_fault().
  */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        intel_wakeref_t wakeref;
@@ -421,7 +477,7 @@ void i915_gem_object_release_mmap(struct 
drm_i915_gem_object *obj)
        if (!obj->userfault_count)
                goto out;
 
-       __i915_gem_object_release_mmap(obj);
+       __i915_gem_object_release_mmap_gtt(obj);
 
        /* Ensure that the CPU's PTE are revoked and there are not outstanding
         * memory transactions from userspace before we return. The TLB
@@ -436,6 +492,34 @@ void i915_gem_object_release_mmap(struct 
drm_i915_gem_object *obj)
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
+static void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object 
*obj)
+{
+       struct i915_mmap_offset *mmo;
+
+       mutex_lock(&obj->mmo_lock);
+       list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+               if (mmo->mmap_type == I915_MMAP_TYPE_OFFSET_WC ||
+                   mmo->mmap_type == I915_MMAP_TYPE_OFFSET_WB ||
+                   mmo->mmap_type == I915_MMAP_TYPE_OFFSET_UC)
+                       drm_vma_node_unmap(&mmo->vma_node,
+                                          
obj->base.dev->anon_inode->i_mapping);
+       }
+       mutex_unlock(&obj->mmo_lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_release_mmap_gtt(obj);
+       i915_gem_object_release_mmap_offset(obj);
+}
+
 static void init_mmap_offset(struct drm_i915_gem_object *obj,
                             struct i915_mmap_offset *mmo)
 {
@@ -614,6 +698,42 @@ static const struct vm_operations_struct 
i915_gem_gtt_vm_ops = {
        .close = i915_gem_vm_close,
 };
 
+static const struct vm_operations_struct i915_gem_cpu_vm_ops = {
+       .fault = i915_gem_fault_cpu,
+       .open = i915_gem_vm_open,
+       .close = i915_gem_vm_close,
+};
+
+static const struct vm_operations_struct i915_gem_lmem_vm_ops = {
+       .fault = i915_gem_fault_lmem,
+       .open = i915_gem_vm_open,
+       .close = i915_gem_vm_close,
+};
+
+static void set_vmdata_mmap_offset(struct i915_mmap_offset *mmo, struct 
vm_area_struct *vma)
+{
+       switch (mmo->mmap_type) {
+       case I915_MMAP_TYPE_OFFSET_WC:
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+               break;
+       case I915_MMAP_TYPE_OFFSET_WB:
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+               break;
+       case I915_MMAP_TYPE_OFFSET_UC:
+               vma->vm_page_prot =
+                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+               break;
+       default:
+               break;
+       }
+
+       if (i915_gem_object_is_lmem(mmo->obj))
+               vma->vm_ops = &i915_gem_lmem_vm_ops;
+       else
+               vma->vm_ops = &i915_gem_cpu_vm_ops;
+}
+
 /* This overcomes the limitation in drm_gem_mmap's assignment of a
  * drm_gem_object as the vma->vm_private_data. Since we need to
  * be able to resolve multiple mmap offsets which could be tied
@@ -677,7 +797,16 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct 
*vma)
        vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        vma->vm_private_data = mmo;
 
-       vma->vm_ops = &i915_gem_gtt_vm_ops;
+       switch (mmo->mmap_type) {
+       case I915_MMAP_TYPE_OFFSET_WC:
+       case I915_MMAP_TYPE_OFFSET_WB:
+       case I915_MMAP_TYPE_OFFSET_UC:
+               set_vmdata_mmap_offset(mmo, vma);
+               break;
+       case I915_MMAP_TYPE_GTT:
+               vma->vm_ops = &i915_gem_gtt_vm_ops;
+               break;
+       }
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2bb0c779c850..fd58b9aea180 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -350,7 +350,7 @@ static inline void i915_gem_object_unpin_map(struct 
drm_i915_gem_object *obj)
        i915_gem_object_unpin_pages(obj);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af63d1a0af14..5a9bd94b6760 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -871,7 +871,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
 
        list_for_each_entry_safe(obj, on,
                                 &i915->ggtt.userfault_list, userfault_link)
-               __i915_gem_object_release_mmap(obj);
+               __i915_gem_object_release_mmap_gtt(obj);
 
        /*
         * The fence will be lost when the device powers down. If any were
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to