This just hides the existing obj->dirty flag inside a trivial inline
setter, to discourage non-GEM code from looking too closely.

Existing code that sets obj->dirty is then changed to use the function
instead.

Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
Cc: Chris Wilson <[email protected]>
Signed-off-by: Dave Gordon <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h            | 11 +++++++++++
 drivers/gpu/drm/i915/i915_gem.c            |  6 +++---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  6 +++---
 5 files changed, 19 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 32f0597..dfa45ef 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3004,6 +3004,17 @@ static inline void i915_gem_object_pin_pages(struct 
drm_i915_gem_object *obj)
        obj->pages_pin_count++;
 }
 
+/*
+ * Flag the object content as having changed since the last call to
+ * i915_gem_object_pin_pages() above, so that the new content is not
+ * lost after the next call to i915_gem_object_unpin_pages() below
+ */
+static inline void i915_gem_object_mark_dirty(struct drm_i915_gem_object *obj)
+{
+       WARN_ON(obj->pages_pin_count == 0);
+       obj->dirty = true;
+}
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pages_pin_count == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d493e79..3fd8cb4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -931,9 +931,9 @@ int i915_gem_obj_prepare_shmem_read(struct 
drm_i915_gem_object *obj,
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
        i915_gem_object_pin_pages(obj);
+       i915_gem_object_mark_dirty(obj);
 
        offset = args->offset;
-       obj->dirty = 1;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
@@ -3791,7 +3791,7 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
        if (write) {
                obj->base.read_domains = I915_GEM_DOMAIN_GTT;
                obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-               obj->dirty = 1;
+               i915_gem_object_mark_dirty(obj);
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -5362,7 +5362,7 @@ struct drm_i915_gem_object *
        i915_gem_object_pin_pages(obj);
        sg = obj->pages;
        bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-       obj->dirty = 1;         /* Backing store is now out of date */
+       i915_gem_object_mark_dirty(obj); /* Backing store is out of date */
        i915_gem_object_unpin_pages(obj);
 
        if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 4e12bae..69b8391 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -747,7 +747,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               from->legacy_hw_ctx.rcs_state->dirty = 1;
+               i915_gem_object_mark_dirty(from->legacy_hw_ctx.rcs_state);
 
                /* obj is kept alive until the next request by its active ref */
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6f4f2a6..584b329 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1109,7 +1109,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
-               obj->dirty = 1; /* be paranoid  */
+               i915_gem_object_mark_dirty(obj); /* be paranoid  */
                obj->base.write_domain = obj->base.pending_write_domain;
                if (obj->base.write_domain == 0)
                        obj->base.pending_read_domains |= 
obj->base.read_domains;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2b7e6bb..15d341e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2451,7 +2451,7 @@ static u32 intel_lr_indirect_ctx_offset(struct 
intel_engine_cs *engine)
                DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-       ctx_obj->dirty = true;
+       i915_gem_object_mark_dirty(ctx_obj);
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
@@ -2735,9 +2735,9 @@ void intel_lr_context_reset(struct drm_i915_private 
*dev_priv,
                if (WARN_ON(IS_ERR(vaddr)))
                        continue;
 
-               reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-               ctx_obj->dirty = true;
+               i915_gem_object_mark_dirty(ctx_obj);
 
+               reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to