[Intel-gfx] [PATCH] drm/i915: Protect mmaped buffers from casual eviction.

2010-05-11 Thread Chris Wilson
By keeping buffers that are in use by the CPU, having been mmapped and
moved to the CPU or GTT domain since their last rendering on a separate
inactive list, prevents the first-pass eviction process from unbinding
one of these buffers. Those buffers are evicted as normal during
evict-everything so that the memory can be recovered under high pressure
or a forced idle.

References:

  Bug 20152 - [G45/GM965 UXA] cannot view JPG in firefox when running UXA
  https://bugs.freedesktop.org/show_bug.cgi?id=20152

  Bug 24369 - Hang when scrolling firefox page with window in front
  https://bugs.freedesktop.org/show_bug.cgi?id=24369

  Bug 15911 -  Intermittent X crash (freeze)
  https://bugzilla.kernel.org/show_bug.cgi?id=15911

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
Tested-by: Christian von Schultz ker...@vonschultz.se
---
 drivers/gpu/drm/i915/i915_drv.h |   13 +++
 drivers/gpu/drm/i915/i915_gem.c |   71 ++
 2 files changed, 76 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 317c9bf..f99936f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -557,6 +557,19 @@ typedef struct drm_i915_private {
 */
struct list_head inactive_list;
 
+   /**
+* LRU list of objects which are not in the ringbuffer and
+* are ready to unbind, but are still in the GTT and currently
+* mapped and in use by the CPU.
+*
+* last_rendering_seqno is 0 while an object is in this list.
+*
+* A reference is not held on the buffer while on this list,
+* as merely being GTT-bound shouldn't prevent its being
+* freed, and we'll pull it off the list in the free path.
+*/
+   struct list_head mmap_list;
+
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 229354e..9a73b20 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,6 +52,7 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object 
*obj,
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
+static int i915_gem_evict_from_mmap_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object 
*obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
@@ -1064,6 +1065,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void 
*data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
 
+   if (ret == 0  obj_priv-gtt_space  !obj_priv-active)
+   list_move_tail(obj_priv-list, dev_priv-mm.mmap_list);
+
drm_gem_object_unreference(obj);
mutex_unlock(dev-struct_mutex);
return ret;
@@ -1197,6 +1201,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct 
vm_fault *vmf)
goto unlock;
}
 
+   if (!obj_priv-active)
+   list_move_tail(obj_priv-list, dev_priv-mm.mmap_list);
+
pfn = ((dev-agp-base + obj_priv-gtt_offset)  PAGE_SHIFT) +
page_offset;
 
@@ -2162,7 +2169,8 @@ i915_gem_evict_everything(struct drm_device *dev)
bool lists_empty;
 
spin_lock(dev_priv-mm.active_list_lock);
-   lists_empty = (list_empty(dev_priv-mm.inactive_list) 
+   lists_empty = (list_empty(dev_priv-mm.mmap_list) 
+  list_empty(dev_priv-mm.inactive_list) 
   list_empty(dev_priv-mm.flushing_list) 
   list_empty(dev_priv-mm.active_list));
spin_unlock(dev_priv-mm.active_list_lock);
@@ -2177,12 +2185,17 @@ i915_gem_evict_everything(struct drm_device *dev)
 
BUG_ON(!list_empty(dev_priv-mm.flushing_list));
 
+   ret = i915_gem_evict_from_mmap_list(dev);
+   if (ret)
+   return ret;
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
 
spin_lock(dev_priv-mm.active_list_lock);
-   lists_empty = (list_empty(dev_priv-mm.inactive_list) 
+   lists_empty = (list_empty(dev_priv-mm.mmap_list) 
+  list_empty(dev_priv-mm.inactive_list) 
   list_empty(dev_priv-mm.flushing_list) 
   list_empty(dev_priv-mm.active_list));
spin_unlock(dev_priv-mm.active_list_lock);
@@ -4624,17 +4637,15 @@ void i915_gem_free_object(struct drm_gem_object *obj)
kfree(obj-driver_private);
 }
 
-/** Unbinds all inactive objects. */
 static 

Re: [Intel-gfx] [PATCH] drm/i915: Protect mmaped buffers from casual eviction.

2010-05-11 Thread Eric Anholt
On Tue, 11 May 2010 16:55:27 +0100, Chris Wilson ch...@chris-wilson.co.uk 
wrote:
 By keeping buffers that are in use by the CPU, having been mmapped and
 moved to the CPU or GTT domain since their last rendering on a separate
 inactive list, prevents the first-pass eviction process from unbinding
 one of these buffers. Those buffers are evicted as normal during
 evict-everything so that the memory can be recovered under high pressure
 or a forced idle.
 
 References:
 
   Bug 20152 - [G45/GM965 UXA] cannot view JPG in firefox when running UXA
   https://bugs.freedesktop.org/show_bug.cgi?id=20152
 
   Bug 24369 - Hang when scrolling firefox page with window in front
   https://bugs.freedesktop.org/show_bug.cgi?id=24369
 
   Bug 15911 -  Intermittent X crash (freeze)
   https://bugzilla.kernel.org/show_bug.cgi?id=15911

Couldn't this be more easily handled by the times where you would move
to the tail of mmap, just move to the tail of inactive?  Since inactive
is obj_priv-gtt_space  !obj_priv-active already.


pgpQ5tuTwLcCk.pgp
Description: PGP signature
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH] drm/i915: Protect mmaped buffers from casual eviction.

2010-05-11 Thread Chris Wilson
On Tue, 11 May 2010 09:38:36 -0700, Eric Anholt e...@anholt.net wrote:
 Couldn't this be more easily handled by the times where you would move
 to the tail of mmap, just move to the tail of inactive?  Since inactive
 is obj_priv-gtt_space  !obj_priv-active already.

The real issue is the inactive list is no longer evicted in LRU, otherwise
just moving to the end of inactive list would be ideal. In benchmarks it
is faster to evict the appropriately sized object rather than iterate
over the inactive list until enough contiguous space has been freed. The
consequence is that the page-fault-of-doom is reintroduced unless some
measure is taken to avoid it. I don't have any figures to suggest what the
average size of the mmap_list will be. As an object is only on the list
until it is used or evict-everything, then the list should be kept quite
short. As our drivers improve, the frequency at which we have to mmap
buffers should reduce as well...
-ickle

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx