The shrinker may appear to recurse into obj->mm.lock as the shrinker may
be called from a direct reclaim path whilst handling get_pages. We
filter out recursing on the same obj->mm.lock by inspecting
obj->mm.pages, but we do want to take the lock on a second object in
order to reap their pages. lockdep spots the recursion on the same
lockclass and needs annotation to avoid a false positive. To keep the
two paths distinct, create an enum to indicate which subclass of
obj->mm.lock. This removes the false positive and avoids masking real
bugs.

Suggested-by: Joonas Lahtinen <[email protected]>
Signed-off-by: Chris Wilson <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h          | 7 ++++++-
 drivers/gpu/drm/i915/i915_gem.c          | 9 +++++----
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 4 ++--
 drivers/gpu/drm/i915/i915_gem_userptr.c  | 2 +-
 4 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9143129cd851..b93a37f5f4ed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3287,7 +3287,12 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object 
*obj)
        __i915_gem_object_unpin_pages(obj);
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+       I915_MM_NORMAL = 0,
+       I915_MM_SHRINKER
+};
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                                enum i915_mm_subclass subclass);
 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
 
 enum i915_map_type {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1568f6756430..cbbfaa7761b9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -491,7 +491,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       __i915_gem_object_put_pages(obj);
+       __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
        if (obj->mm.pages)
                return -EBUSY;
 
@@ -2181,7 +2181,8 @@ static void __i915_gem_object_reset_page_iter(struct 
drm_i915_gem_object *obj)
                radix_tree_delete(&obj->mm.get_page.radix, iter.index);
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                                enum i915_mm_subclass subclass)
 {
        struct sg_table *pages;
 
@@ -2193,7 +2194,7 @@ void __i915_gem_object_put_pages(struct 
drm_i915_gem_object *obj)
                return;
 
        /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock_nested(&obj->mm.lock, SINGLE_DEPTH_NESTING);
+       mutex_lock_nested(&obj->mm.lock, subclass);
        if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
                goto unlock;
 
@@ -4283,7 +4284,7 @@ static void __i915_gem_free_objects(struct 
drm_i915_private *i915,
 
                if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                        atomic_set(&obj->mm.pages_pin_count, 0);
-               __i915_gem_object_put_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                GEM_BUG_ON(obj->mm.pages);
 
                if (obj->base.import_attach)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 0993afc0e725..f988652f1e26 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -111,7 +111,7 @@ static bool can_release_pages(struct drm_i915_gem_object 
*obj)
 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
        if (i915_gem_object_unbind(obj) == 0)
-               __i915_gem_object_put_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
        return !READ_ONCE(obj->mm.pages);
 }
 
@@ -225,7 +225,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        if (unsafe_drop_pages(obj)) {
                                /* May arrive from get_pages on another bo */
                                mutex_lock_nested(&obj->mm.lock,
-                                                 SINGLE_DEPTH_NESTING);
+                                                 I915_MM_SHRINKER);
                                if (!obj->mm.pages) {
                                        __i915_gem_object_invalidate(obj);
                                        list_del_init(&obj->global_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c30d04f64670..9bf44b5bca10 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -75,7 +75,7 @@ static void cancel_userptr(struct work_struct *work)
 
        /* We are inside a kthread context and can't be interrupted */
        if (i915_gem_object_unbind(obj) == 0)
-               __i915_gem_object_put_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
        WARN_ONCE(obj->mm.pages,
                  "Failed to release pages: bind_count=%d, pages_pin_count=%d, 
pin_display=%d\n",
                  obj->bind_count,
-- 
2.10.2

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to