Since we may have VMA allocated for an object, but we interrupted their
binding, there is a disparity between have elements on the obj->vma_list
and being bound. i915_gem_obj_bound_any() does this check, but this is
not rigorously observed - add an explicit count to make it easier.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_debugfs.c      | 12 +++++------
 drivers/gpu/drm/i915/i915_drv.h          |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c          | 34 +++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 17 +---------------
 drivers/gpu/drm/i915/i915_gem_stolen.c   |  1 +
 5 files changed, 23 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d1f470567b0..e2b1242e369b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -164,6 +164,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object 
*obj)
        if (obj->fence_reg != I915_FENCE_REG_NONE)
                seq_printf(m, " (fence: %d)", obj->fence_reg);
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!drm_mm_node_allocated(&vma->node))
+                       continue;
+
                seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
                           vma->is_ggtt ? "g" : "pp",
                           vma->node.start, vma->node.size);
@@ -331,11 +334,11 @@ static int per_file_stats(int id, void *ptr, void *data)
        struct drm_i915_gem_object *obj = ptr;
        struct file_stats *stats = data;
        struct i915_vma *vma;
-       int bound = 0;
 
        stats->count++;
        stats->total += obj->base.size;
-
+       if (!obj->bind_count)
+               stats->unbound += obj->base.size;
        if (obj->base.name || obj->base.dma_buf)
                stats->shared += obj->base.size;
 
@@ -343,8 +346,6 @@ static int per_file_stats(int id, void *ptr, void *data)
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
-               bound++;
-
                if (vma->is_ggtt) {
                        stats->global += vma->node.size;
                } else {
@@ -362,9 +363,6 @@ static int per_file_stats(int id, void *ptr, void *data)
                        stats->inactive += vma->node.size;
        }
 
-       if (!bound)
-               stats->unbound += obj->base.size;
-
        return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aa9d3782107e..8f5cf244094e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2088,6 +2088,8 @@ struct drm_i915_gem_object {
 
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
+       /** Count of VMA actually bound by this object */
+       unsigned int bind_count;
        unsigned int pin_display;
 
        struct sg_table *pages;
@@ -2874,7 +2876,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
        return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
 }
 
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view);
 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 164ebdaa0369..ed3f306af42f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1812,7 +1812,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages_pin_count)
                return -EBUSY;
 
-       BUG_ON(i915_gem_obj_bound_any(obj));
+       BUG_ON(obj->bind_count);
 
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
@@ -2558,7 +2558,6 @@ static void i915_gem_object_finish_gtt(struct 
drm_i915_gem_object *obj)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
 
        if (list_empty(&vma->obj_link))
@@ -2572,7 +2571,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool 
wait)
        if (vma->pin_count)
                return -EBUSY;
 
-       BUG_ON(obj->pages == NULL);
+       GEM_BUG_ON(obj->bind_count == 0);
+       GEM_BUG_ON(obj->pages == NULL);
 
        if (wait) {
                ret = i915_gem_object_wait_rendering(obj, false);
@@ -2610,8 +2610,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool 
wait)
 
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
-       if (list_empty(&obj->vma_list))
-               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       if (--obj->bind_count == 0)
+               list_move_tail(&obj->global_list,
+                              &to_i915(obj->base.dev)->mm.unbound_list);
 
        /* And finally now the object is completely decoupled from this vma,
         * we can drop its hold on the backing storage and allow it to be
@@ -2849,6 +2850,7 @@ search_free:
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->vm_link, &vm->inactive_list);
+       obj->bind_count++;
 
        return vma;
 
@@ -3037,7 +3039,6 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct i915_vma *vma, *next;
-       bool bound = false;
        int ret = 0;
 
        if (obj->cache_level == cache_level)
@@ -3061,8 +3062,7 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
                        ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
-               } else
-                       bound = true;
+               }
        }
 
        /* We can reuse the existing drm_mm nodes but need to change the
@@ -3072,7 +3072,7 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
         * rewrite the PTE in the belief that doing so tramples upon less
         * state and so involves less work.
         */
-       if (bound) {
+       if (obj->bind_count) {
                /* Before we change the PTE, the GPU must not be accessing it.
                 * If we wait upon the object, we know that all the bound
                 * VMA are no longer active.
@@ -3281,6 +3281,9 @@ i915_gem_object_pin_to_display_plane(struct 
drm_i915_gem_object *obj,
                                            old_read_domains,
                                            old_write_domain);
 
+       /* Increment the pages_pin_count to guard against the shrinker */
+       obj->pages_pin_count++;
+
        return 0;
 
 err_unpin_display:
@@ -3297,6 +3300,7 @@ i915_gem_object_unpin_from_display_plane(struct 
drm_i915_gem_object *obj,
 
        i915_gem_object_ggtt_unpin_view(obj, view);
 
+       obj->pages_pin_count--;
        obj->pin_display--;
 }
 
@@ -3797,6 +3801,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                        dev_priv->mm.interruptible = was_interruptible;
                }
        }
+       GEM_BUG_ON(obj->bind_count);
 
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
@@ -4485,17 +4490,6 @@ bool i915_gem_obj_ggtt_bound_view(struct 
drm_i915_gem_object *o,
        return false;
 }
 
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (drm_mm_node_allocated(&vma->node))
-                       return true;
-
-       return false;
-}
-
 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
                                struct i915_address_space *vm)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 777959b47ccf..fa190ef3f727 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -47,21 +47,6 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct 
task_struct *task)
 #endif
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-       int count = 0;
-
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (drm_mm_node_allocated(&vma->node))
-                       count++;
-               if (vma->pin_count)
-                       count++;
-       }
-
-       return count;
-}
-
 static bool swap_available(void)
 {
        return get_nr_swap_pages() > 0;
@@ -77,7 +62,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
         * to the GPU, simply unbinding from the GPU is not going to succeed
         * in releasing our pin count on the pages themselves.
         */
-       if (obj->pages_pin_count != num_vma_bound(obj))
+       if (obj->pages_pin_count != obj->bind_count)
                return false;
 
        /* We can only return physical pages to the system if we can either
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 463be259a505..1c81a1470baf 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -693,6 +693,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_device *dev,
        vma->bound |= GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
        list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+       obj->bind_count++;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
        i915_gem_object_pin_pages(obj);
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to