Support basic eviction for regions.

Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janul...@linux.intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  7 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.c    | 11 +++
 drivers/gpu/drm/i915/gem/i915_gem_region.h    |  1 +
 drivers/gpu/drm/i915/i915_gem.c               | 17 +++++
 drivers/gpu/drm/i915/intel_memory_region.c    | 73 +++++++++++++++++-
 drivers/gpu/drm/i915/intel_memory_region.h    |  5 ++
 .../drm/i915/selftests/intel_memory_region.c  | 76 +++++++++++++++++++
 drivers/gpu/drm/i915/selftests/mock_region.c  |  1 +
 8 files changed, 187 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a32066e66271..5e2fa37e9bc0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -168,6 +168,13 @@ struct drm_i915_gem_object {
                 * List of memory region blocks allocated for this object.
                 */
                struct list_head blocks;
+               /**
+                * Element within memory_region->objects or
+                * memory_region->purgeable if the object is marked as
+                * DONTNEED. Access is protected by memory_region->obj_lock.
+                */
+               struct list_head region_link;
+               struct list_head tmp_link;
 
                struct sg_table *pages;
                void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 3cd1bf15e25b..be126e70c90f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -102,6 +102,17 @@ void i915_gem_object_init_memory_region(struct 
drm_i915_gem_object *obj,
 {
        INIT_LIST_HEAD(&obj->mm.blocks);
        obj->mm.region= mem;
+
+       mutex_lock(&mem->obj_lock);
+       list_add(&obj->mm.region_link, &mem->objects);
+       mutex_unlock(&mem->obj_lock);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+       mutex_lock(&obj->mm.region->obj_lock);
+       list_del(&obj->mm.region_link);
+       mutex_unlock(&obj->mm.region->obj_lock);
 }
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index da5a2ca1a0fb..ebddc86d78f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -18,6 +18,7 @@ void i915_gem_object_put_pages_buddy(struct 
drm_i915_gem_object *obj,
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
                                        struct intel_memory_region *mem);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
 i915_gem_object_create_region(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6ff01a404346..8735dea74809 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1105,6 +1105,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void 
*data,
            !i915_gem_object_has_pages(obj))
                i915_gem_object_truncate(obj);
 
+       if (obj->mm.region) {
+               mutex_lock(&obj->mm.region->obj_lock);
+
+               switch (obj->mm.madv) {
+               case I915_MADV_WILLNEED:
+                       list_move(&obj->mm.region_link,
+                                 &obj->mm.region->objects);
+                       break;
+               default:
+                       list_move(&obj->mm.region_link,
+                                 &obj->mm.region->purgeable);
+                       break;
+               }
+
+               mutex_unlock(&obj->mm.region->obj_lock);
+       }
+
        args->retained = obj->mm.madv != __I915_MADV_PURGED;
        mutex_unlock(&obj->mm.lock);
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c 
b/drivers/gpu/drm/i915/intel_memory_region.c
index ef12e462acb8..3a3caaadea1f 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -12,6 +12,51 @@ const u32 intel_region_map[] = {
        [INTEL_MEMORY_STOLEN] = BIT(INTEL_STOLEN + INTEL_MEMORY_TYPE_SHIFT) | 
BIT(0),
 };
 
+static int
+intel_memory_region_evict(struct intel_memory_region *mem,
+                         resource_size_t target,
+                         unsigned int flags)
+{
+       struct drm_i915_gem_object *obj;
+       resource_size_t found;
+       int err;
+
+       err = 0;
+       found = 0;
+
+       mutex_lock(&mem->obj_lock);
+       list_for_each_entry(obj, &mem->purgeable, mm.region_link) {
+               if (!i915_gem_object_has_pages(obj))
+                       continue;
+
+               if (READ_ONCE(obj->pin_global))
+                       continue;
+
+               if (atomic_read(&obj->bind_count))
+                       continue;
+
+               mutex_unlock(&mem->obj_lock);
+
+               __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+
+               mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER);
+               if (!i915_gem_object_has_pages(obj)) {
+                       obj->mm.madv = __I915_MADV_PURGED;
+                       found += obj->base.size;
+               }
+               mutex_unlock(&obj->mm.lock);
+
+               if (found >= target)
+                       return 0;
+
+               mutex_lock(&mem->obj_lock);
+       }
+
+       err = -ENOSPC;
+       mutex_unlock(&mem->obj_lock);
+       return err;
+}
+
 static u64
 intel_memory_region_free_pages(struct intel_memory_region *mem,
                               struct list_head *blocks)
@@ -63,7 +108,8 @@ __intel_memory_region_get_pages_buddy(struct 
intel_memory_region *mem,
        do {
                struct i915_buddy_block *block;
                unsigned int order;
-
+               bool retry = true;
+retry:
                order = fls(n_pages) - 1;
                GEM_BUG_ON(order > mem->mm.max_order);
 
@@ -72,9 +118,24 @@ __intel_memory_region_get_pages_buddy(struct 
intel_memory_region *mem,
                        if (!IS_ERR(block))
                                break;
 
-                       /* XXX: some kind of eviction pass, local to the device 
*/
-                       if (flags & I915_ALLOC_CONTIGUOUS || !order--)
-                               goto err_free_blocks;
+                       if (flags & I915_ALLOC_CONTIGUOUS || !order--) {
+                               resource_size_t target;
+                               int err;
+
+                               if (!retry)
+                                       goto err_free_blocks;
+
+                               target = n_pages * mem->mm.chunk_size;
+
+                               mutex_unlock(&mem->mm_lock);
+                               err = intel_memory_region_evict(mem, target, 0);
+                               mutex_lock(&mem->mm_lock);
+                               if (err)
+                                       goto err_free_blocks;
+
+                               retry = false;
+                               goto retry;
+                       }
                } while (1);
 
                n_pages -= BIT(order);
@@ -147,6 +208,10 @@ intel_memory_region_create(struct drm_i915_private *i915,
        mem->min_page_size = min_page_size;
        mem->ops = ops;
 
+       mutex_init(&mem->obj_lock);
+       INIT_LIST_HEAD(&mem->objects);
+       INIT_LIST_HEAD(&mem->purgeable);
+
        mutex_init(&mem->mm_lock);
 
        if (ops->init) {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h 
b/drivers/gpu/drm/i915/intel_memory_region.h
index d299fed169e9..340411dcf86b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -78,6 +78,11 @@ struct intel_memory_region {
        unsigned int type;
        unsigned int instance;
        unsigned int id;
+
+       /* Protects access to objects and purgeable */
+       struct mutex obj_lock;
+       struct list_head objects;
+       struct list_head purgeable;
 };
 
 int intel_memory_region_init_buddy(struct intel_memory_region *mem);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 84daa92bf92c..2f13e4c1d999 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -79,10 +79,86 @@ static int igt_mock_fill(void *arg)
        return err;
 }
 
+static void igt_mark_evictable(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin_pages(obj);
+       obj->mm.madv = I915_MADV_DONTNEED;
+       list_move(&obj->mm.region_link, &obj->mm.region->purgeable);
+}
+
+static int igt_mock_shrink(void *arg)
+{
+       struct intel_memory_region *mem = arg;
+       struct drm_i915_gem_object *obj;
+       unsigned long n_objects;
+       LIST_HEAD(objects);
+       resource_size_t target;
+       resource_size_t total;
+       int err = 0;
+
+       target = mem->mm.chunk_size;
+       total = resource_size(&mem->region);
+       n_objects = total / target;
+
+       while (n_objects--) {
+               obj = i915_gem_object_create_region(mem,
+                                                   target,
+                                                   0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto err_close_objects;
+               }
+
+               list_add(&obj->st_link, &objects);
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto err_close_objects;
+
+               /*
+                * Make half of the region evictable, though do so in a
+                * horribly fragmented fashion.
+                */
+               if (n_objects % 2)
+                       igt_mark_evictable(obj);
+       }
+
+       while (target <= total / 2) {
+               obj = i915_gem_object_create_region(mem, target, 0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto err_close_objects;
+               }
+
+               list_add(&obj->st_link, &objects);
+
+               /* Provoke the shrinker to start violently swinging its axe! */
+               err = i915_gem_object_pin_pages(obj);
+               if (err) {
+                       pr_err("failed to shrink for target=%pa", &target);
+                       goto err_close_objects;
+               }
+
+               /* Again, half of the region should remain evictable */
+               igt_mark_evictable(obj);
+
+               target <<= 1;
+       }
+
+err_close_objects:
+       close_objects(&objects);
+
+       if (err == -ENOMEM)
+               err = 0;
+
+       return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_mock_fill),
+               SUBTEST(igt_mock_shrink),
        };
        struct intel_memory_region *mem;
        struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c 
b/drivers/gpu/drm/i915/selftests/mock_region.c
index fbde890385c7..cc97250dca62 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -11,6 +11,7 @@
 static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
        .get_pages = i915_gem_object_get_pages_buddy,
        .put_pages = i915_gem_object_put_pages_buddy,
+       .release = i915_gem_object_release_memory_region,
 };
 
 static struct drm_i915_gem_object *
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to