Re: [Intel-gfx] [PATCH 3/3] drm/i915: Add support for stealing purgable stolen pages

2015-04-14 Thread shuang . he
Tested-By: Intel Graphics QA PRTS (Patch Regression Test System Contact: 
shuang...@intel.com)
Task id: 6183
-Summary-
Platform  Delta  drm-intel-nightly  Series Applied
PNV  270/270  270/270
ILK  303/303  303/303
SNB -21  304/304  283/304
IVB  337/337  337/337
BYT -1  287/287  286/287
HSW  361/361  361/361
BDW  309/309  309/309
-Detailed-
Platform  Testdrm-intel-nightly  Series 
Applied
 SNB  igt@kms_cursor_crc@cursor-size-change  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@kms_mmio_vs_cs_flip@setcrtc_vs_cs_flip  NSPT(3)PASS(1)  
NSPT(2)
 SNB  igt@kms_mmio_vs_cs_flip@setplane_vs_cs_flip  NSPT(3)PASS(1)  
NSPT(2)
 SNB  igt@kms_rotation_crc@primary-rotation  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@kms_rotation_crc@sprite-rotation  NSPT(3)PASS(3)  NSPT(2)
 SNB  igt@pm_rpm@cursor  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@cursor-dpms  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@dpms-mode-unset-non-lpsp  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@dpms-non-lpsp  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@drm-resources-equal  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@fences  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@fences-dpms  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@gem-execbuf  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@gem-mmap-cpu  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@gem-mmap-gtt  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@gem-pread  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@i2c  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@modeset-non-lpsp  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@modeset-non-lpsp-stress-no-wait  NSPT(3)PASS(1)  
NSPT(2)
 SNB  igt@pm_rpm@pci-d3-state  NSPT(3)PASS(1)  NSPT(2)
 SNB  igt@pm_rpm@rte  NSPT(3)PASS(1)  NSPT(2)
*BYT  igt@gem_exec_bad_domains@conflicting-write-domain  PASS(4)  
FAIL(1)PASS(1)
Note: You need to pay more attention to line start with '*'
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 3/3] drm/i915: Add support for stealing purgable stolen pages

2015-04-11 Thread ankitprasad . r . sharma
From: Chris Wilson chris at chris-wilson.co.uk

If we run out of stolen memory when trying to allocate an object, see if
we can reap enough purgeable objects to free up enough contiguous free
space for the allocation. This is in principle very much like evicting
objects to free up enough contiguous space in the vma when binding
a new object - and you will be forgiven for thinking that the code looks
very similar.

At the moment, we do not allow userspace to allocate objects in stolen,
so there is neither the memory pressure to trigger stolen eviction nor
any purgeable objects inside the stolen arena. However, this will change
in the near future, and so better management and defragmentation of
stolen memory will become a real issue.

v2: Remember to remove the drm_mm_node.

testcase: igt/gem_create_stolen

Signed-off-by: Chris Wilson c...@chris-wilson.co.uk
Cc: Gupta, Sourab sourab.gu...@intel.com
Cc: Goel, Akash akash.g...@intel.com
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 121 ++---
 1 file changed, 110 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f8da716..0a38d71 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -430,18 +430,29 @@ cleanup:
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+static bool mark_free(struct drm_i915_gem_object *obj, struct list_head 
*unwind)
+{
+   if (obj-stolen == NULL)
+   return false;
+
+   if (obj-madv != I915_MADV_DONTNEED)
+   return false;
+
+   if (i915_gem_obj_is_pinned(obj))
+   return false;
+
+   list_add(obj-obj_exec_link, unwind);
+   return drm_mm_scan_add_block(obj-stolen);
+}
+
+static struct drm_mm_node *
+stolen_alloc(struct drm_i915_private *dev_priv, u32 size)
 {
-   struct drm_i915_private *dev_priv = dev-dev_private;
-   struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+   struct drm_i915_gem_object *obj;
+   struct list_head unwind, evict;
int ret;
 
-   if (!drm_mm_initialized(dev_priv-mm.stolen))
-   return NULL;
-
-   DRM_DEBUG_KMS(creating stolen object: size=%x\n, size);
if (size == 0)
return NULL;
 
@@ -451,11 +462,99 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 
size)
 
ret = drm_mm_insert_node(dev_priv-mm.stolen, stolen, size,
 4096, DRM_MM_SEARCH_DEFAULT);
-   if (ret) {
-   kfree(stolen);
-   return NULL;
+   if (ret == 0)
+   return stolen;
+
+   /* No more stolen memory available, or too fragmented.
+* Try evicting purgeable objects and search again.
+*/
+
+   drm_mm_init_scan(dev_priv-mm.stolen, size, 4096, 0);
+   INIT_LIST_HEAD(unwind);
+
+   list_for_each_entry(obj, dev_priv-mm.unbound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+   list_for_each_entry(obj, dev_priv-mm.bound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+found:
+   INIT_LIST_HEAD(evict);
+   while (!list_empty(unwind)) {
+   obj = list_first_entry(unwind,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (drm_mm_scan_remove_block(obj-stolen)) {
+   list_add(obj-obj_exec_link, evict);
+   drm_gem_object_reference(obj-base);
+   }
}
 
+   ret = 0;
+   while (!list_empty(evict)) {
+   obj = list_first_entry(evict,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (ret == 0) {
+   struct i915_vma *vma, *vma_next;
+
+   list_for_each_entry_safe(vma, vma_next,
+obj-vma_list,
+vma_link)
+   if (i915_vma_unbind(vma))
+   break;
+
+   /* Stolen pins its pages to prevent the
+* normal shrinker from processing stolen
+* objects.
+*/
+   i915_gem_object_unpin_pages(obj);
+
+   ret = i915_gem_object_put_pages(obj);
+   if (ret == 0) {
+   i915_gem_object_release_stolen(obj);
+   obj-madv = __I915_MADV_PURGED;
+   } else
+   

[Intel-gfx] [PATCH 3/3] drm/i915: Add support for stealing purgable stolen pages

2014-06-16 Thread sourab . gupta
From: Chris Wilson ch...@chris-wilson.co.uk

If we run out of stolen memory when trying to allocate an object, see if
we can reap enough purgeable objects to free up enough contiguous free
space for the allocation. This is in principle very much like evicting
objects to free up enough contiguous space in the vma when binding
a new object - and you will be forgiven for thinking that the code looks
very similar.

At the moment, we do not allow userspace to allocate objects in stolen,
so there is neither the memory pressure to trigger stolen eviction nor
any purgeable objects inside the stolen arena. However, this will change
in the near future, and so better management and defragmentation of
stolen memory will become a real issue.

v2: Remember to remove the drm_mm_node.

testcase: igt/gem_create2

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
Cc: Gupta, Sourab sourab.gu...@intel.com
Cc: Goel, Akash akash.g...@intel.com
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 121 ++---
 1 file changed, 110 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 6441178..042ae61 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -340,18 +340,29 @@ cleanup:
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+static bool mark_free(struct drm_i915_gem_object *obj, struct list_head 
*unwind)
+{
+   if (obj-stolen == NULL)
+   return false;
+
+   if (obj-madv != I915_MADV_DONTNEED)
+   return false;
+
+   if (i915_gem_obj_is_pinned(obj))
+   return false;
+
+   list_add(obj-obj_exec_link, unwind);
+   return drm_mm_scan_add_block(obj-stolen);
+}
+
+static struct drm_mm_node *
+stolen_alloc(struct drm_i915_private *dev_priv, u32 size)
 {
-   struct drm_i915_private *dev_priv = dev-dev_private;
-   struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+   struct drm_i915_gem_object *obj;
+   struct list_head unwind, evict;
int ret;
 
-   if (!drm_mm_initialized(dev_priv-mm.stolen))
-   return NULL;
-
-   DRM_DEBUG_KMS(creating stolen object: size=%x\n, size);
if (size == 0)
return NULL;
 
@@ -361,11 +372,99 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 
size)
 
ret = drm_mm_insert_node(dev_priv-mm.stolen, stolen, size,
 4096, DRM_MM_SEARCH_DEFAULT);
-   if (ret) {
-   kfree(stolen);
-   return NULL;
+   if (ret == 0)
+   return stolen;
+
+   /* No more stolen memory available, or too fragmented.
+* Try evicting purgeable objects and search again.
+*/
+
+   drm_mm_init_scan(dev_priv-mm.stolen, size, 4096, 0);
+   INIT_LIST_HEAD(unwind);
+
+   list_for_each_entry(obj, dev_priv-mm.unbound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+   list_for_each_entry(obj, dev_priv-mm.bound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+found:
+   INIT_LIST_HEAD(evict);
+   while (!list_empty(unwind)) {
+   obj = list_first_entry(unwind,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (drm_mm_scan_remove_block(obj-stolen)) {
+   list_add(obj-obj_exec_link, evict);
+   drm_gem_object_reference(obj-base);
+   }
}
 
+   ret = 0;
+   while (!list_empty(evict)) {
+   obj = list_first_entry(evict,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (ret == 0) {
+   struct i915_vma *vma, *vma_next;
+
+   list_for_each_entry_safe(vma, vma_next,
+obj-vma_list,
+vma_link)
+   if (i915_vma_unbind(vma))
+   break;
+
+   /* Stolen pins its pages to prevent the
+* normal shrinker from processing stolen
+* objects.
+*/
+   i915_gem_object_unpin_pages(obj);
+
+   ret = i915_gem_object_put_pages(obj);
+   if (ret == 0) {
+   i915_gem_object_release_stolen(obj);
+   obj-madv = __I915_MADV_PURGED;
+   } else
+   

[Intel-gfx] [PATCH 3/3] drm/i915: Add support for stealing purgable stolen pages

2014-03-24 Thread Rodrigo Vivi
From: Chris Wilson ch...@chris-wilson.co.uk

If we run out of stolen memory when trying to allocate an object, see if
we can reap enough purgeable objects to free up enough contiguous free
space for the allocation. This is in principle very much like evicting
objects to free up enough contiguous space in the vma when binding
a new object - and you will be forgiven for thinking that the code looks
very similar.

At the moment, we do not allow userspace to allocate objects in stolen,
so there is neither the memory pressure to trigger stolen eviction nor
any purgeable objects inside the stolen arena. However, this will change
in the near future, and so better management and defragmentation of
stolen memory will become a real issue.

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
Cc: Gupta, Sourab sourab.gu...@intel.com
Cc: Goel, Akash akash.g...@intel.com
Signed-off-by: Rodrigo Vivi rodrigo.v...@gmail.com
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 119 ++---
 1 file changed, 108 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 28d24ca..896f342 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -329,18 +329,25 @@ cleanup:
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+static bool mark_free(struct drm_i915_gem_object *obj, struct list_head 
*unwind)
+{
+   if (obj-stolen == NULL)
+   return false;
+
+   if (obj-madv != I915_MADV_DONTNEED)
+   return false;
+
+   list_add(obj-obj_exec_link, unwind);
+   return drm_mm_scan_add_block(obj-stolen);
+}
+
+static struct drm_mm_node *stolen_alloc(struct drm_i915_private *dev_priv, u32 
size)
 {
-   struct drm_i915_private *dev_priv = dev-dev_private;
-   struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+   struct drm_i915_gem_object *obj;
+   struct list_head unwind, evict;
int ret;
 
-   if (!drm_mm_initialized(dev_priv-mm.stolen))
-   return NULL;
-
-   DRM_DEBUG_KMS(creating stolen object: size=%x\n, size);
if (size == 0)
return NULL;
 
@@ -350,11 +357,101 @@ i915_gem_object_create_stolen(struct drm_device *dev, 
u32 size)
 
ret = drm_mm_insert_node(dev_priv-mm.stolen, stolen, size,
 4096, DRM_MM_SEARCH_DEFAULT);
-   if (ret) {
-   kfree(stolen);
-   return NULL;
+   if (ret == 0)
+   return stolen;
+
+   /* No more stolen memory available, or too fragmented.
+* Try evicting purgeable objects and search again.
+*/
+
+   drm_mm_init_scan(dev_priv-mm.stolen, size, 4096, 0);
+   INIT_LIST_HEAD(unwind);
+
+   list_for_each_entry(obj, dev_priv-mm.unbound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+   list_for_each_entry(obj, dev_priv-mm.bound_list, global_list)
+   if (mark_free(obj, unwind))
+   goto found;
+
+found:
+   INIT_LIST_HEAD(evict);
+   while (!list_empty(unwind)) {
+   obj = list_first_entry(unwind,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (drm_mm_scan_remove_block(obj-stolen)) {
+   list_add(obj-obj_exec_link, evict);
+   drm_gem_object_reference(obj-base);
+   }
}
 
+   ret = 0;
+   while (!list_empty(evict)) {
+   obj = list_first_entry(evict,
+  struct drm_i915_gem_object,
+  obj_exec_link);
+   list_del_init(obj-obj_exec_link);
+
+   if (ret == 0) {
+   struct i915_vma *vma, *vma_next;
+
+   list_for_each_entry_safe(vma, vma_next,
+obj-vma_list,
+vma_link)
+   if (i915_vma_unbind(vma))
+   break;
+
+   /* Stolen pins its pages to prevent the
+* normal shrinker from processing stolen
+* objects.
+*/
+   i915_gem_object_unpin_pages(obj);
+
+   ret = i915_gem_object_put_pages(obj);
+   if (ret == 0) {
+   obj-madv = __I915_MADV_PURGED;
+
+   kfree(obj-stolen);
+   obj-stolen = NULL;
+   } else
+   i915_gem_object_pin_pages(obj);
+   }
+
+