By keeping buffers that are in use by the CPU, having been mmapped and
moved to the CPU or GTT domain since their last rendering on a separate
inactive list, prevents the first-pass eviction process from unbinding
one of these buffers. Those buffers are evicted as normal during
evict-everything so that the memory can be recovered under high pressure
or a forced idle.

References:

  Bug 20152 - [G45/GM965 UXA] cannot view JPG in firefox when running UXA
  https://bugs.freedesktop.org/show_bug.cgi?id=20152

  Bug 24369 - Hang when scrolling firefox page with window in front
  https://bugs.freedesktop.org/show_bug.cgi?id=24369

  Bug 15911 -  Intermittent X crash (freeze)
  https://bugzilla.kernel.org/show_bug.cgi?id=15911

Signed-off-by: Chris Wilson <[email protected]>
Tested-by: Christian von Schultz <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h |   13 +++++++
 drivers/gpu/drm/i915/i915_gem.c |   71 ++++++++++++++++++++++++++++++++++----
 2 files changed, 76 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 317c9bf..f99936f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -557,6 +557,19 @@ typedef struct drm_i915_private {
                 */
                struct list_head inactive_list;
 
+               /**
+                * LRU list of objects which are not in the ringbuffer and
+                * are ready to unbind, but are still in the GTT and currently
+                * mapped and in use by the CPU.
+                *
+                * last_rendering_seqno is 0 while an object is in this list.
+                *
+                * A reference is not held on the buffer while on this list,
+                * as merely being GTT-bound shouldn't prevent its being
+                * freed, and we'll pull it off the list in the free path.
+                */
+               struct list_head mmap_list;
+
                /** LRU list of objects with fence regs on them. */
                struct list_head fence_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 229354e..9a73b20 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,6 +52,7 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object 
*obj,
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
+static int i915_gem_evict_from_mmap_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object 
*obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
@@ -1064,6 +1065,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void 
*data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
 
+       if (ret == 0 && obj_priv->gtt_space && !obj_priv->active)
+           list_move_tail(&obj_priv->list, &dev_priv->mm.mmap_list);
+
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1197,6 +1201,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct 
vm_fault *vmf)
                        goto unlock;
        }
 
+       if (!obj_priv->active)
+               list_move_tail(&obj_priv->list, &dev_priv->mm.mmap_list);
+
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
 
@@ -2162,7 +2169,8 @@ i915_gem_evict_everything(struct drm_device *dev)
        bool lists_empty;
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+       lists_empty = (list_empty(&dev_priv->mm.mmap_list) &&
+                      list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->mm.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);
@@ -2177,12 +2185,17 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
+       ret = i915_gem_evict_from_mmap_list(dev);
+       if (ret)
+               return ret;
+
        ret = i915_gem_evict_from_inactive_list(dev);
        if (ret)
                return ret;
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+       lists_empty = (list_empty(&dev_priv->mm.mmap_list) &&
+                      list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->mm.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);
@@ -4624,17 +4637,15 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        kfree(obj->driver_private);
 }
 
-/** Unbinds all inactive objects. */
 static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
+i915_gem_evict_from_list(struct drm_device *dev,
+                        struct list_head *list)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       while (!list_empty(&dev_priv->mm.inactive_list)) {
+       while (!list_empty(list)) {
                struct drm_gem_object *obj;
                int ret;
 
-               obj = list_first_entry(&dev_priv->mm.inactive_list,
+               obj = list_first_entry(list,
                                       struct drm_i915_gem_object,
                                       list)->obj;
 
@@ -4648,6 +4659,23 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
        return 0;
 }
 
+/** Unbinds all inactive objects. */
+static int
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       return i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+}
+
+static int
+i915_gem_evict_from_mmap_list(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       return i915_gem_evict_from_list(dev, &dev_priv->mm.mmap_list);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -4674,6 +4702,12 @@ i915_gem_idle(struct drm_device *dev)
                        mutex_unlock(&dev->struct_mutex);
                        return ret;
                }
+
+               ret = i915_gem_evict_from_mmap_list(dev);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
        }
 
        /* Hack!  Don't let anybody do execbuf while we don't control the chip.
@@ -5010,6 +5044,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+       BUG_ON(!list_empty(&dev_priv->mm.mmap_list));
        BUG_ON(!list_empty(&dev_priv->mm.request_list));
        mutex_unlock(&dev->struct_mutex);
 
@@ -5053,6 +5088,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.mmap_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5308,6 +5344,22 @@ i915_gpu_is_active(struct drm_device *dev)
 }
 
 static int
+i915_move_mmap_onto_inactive_list(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (list_empty(&dev_priv->mm.mmap_list))
+               return 0;
+
+       do {
+               list_move_tail(dev_priv->mm.mmap_list.next,
+                              &dev_priv->mm.inactive_list);
+       } while (!list_empty(&dev_priv->mm.mmap_list));
+
+       return 1;
+}
+
+static int
 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 {
        drm_i915_private_t *dev_priv, *next_dev;
@@ -5416,6 +5468,9 @@ rescan:
                                active++;
                        }
 
+                       if (i915_move_mmap_onto_inactive_list(dev))
+                               active++;
+
                        spin_lock(&shrink_list_lock);
                        mutex_unlock(&dev->struct_mutex);
                }
-- 
1.7.1

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to