This just hides the existing obj->dirty flag inside a trivial inline
setter, to discourage non-GEM code from looking too closely. The
flag is renamed to emphasise that it is private to the GEM memory-
management code and ensure that no legacy code continues to use it
directly.

v2:
Use Chris Wilson's preferred names for flag-related functions

v3:
Remove a couple of changes left over from a prototype version

Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@linux.intel.com>
Signed-off-by: Dave Gordon <david.s.gor...@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 23 ++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  6 ++++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c    | 12 +++++++-----
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 29 ++++++++++++++++-------------
 8 files changed, 63 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 64702cc..8acf281 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -160,7 +160,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct 
drm_i915_gem_object *obj)
                   i915_gem_active_get_seqno(&obj->last_write,
                                             &obj->base.dev->struct_mutex),
                   i915_cache_level_str(dev_priv, obj->cache_level),
-                  obj->dirty ? " dirty" : "",
+                  i915_gem_object_is_dirty(obj) ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e2dda8..3fed004 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2212,7 +2212,7 @@ struct drm_i915_gem_object {
         * This is set if the object has been written to since last bound
         * to the GTT
         */
-       unsigned int dirty:1;
+       unsigned int __dirty:1;
 
        /**
         * Advice: are the backing pages purgeable?
@@ -3159,6 +3159,26 @@ static inline void i915_gem_object_pin_pages(struct 
drm_i915_gem_object *obj)
        obj->pages_pin_count++;
 }
 
+/*
+ * Flag the object content as having changed since the last call to
+ * i915_gem_object_pin_pages() above, so that the new content is not
+ * lost after the next call to i915_gem_object_unpin_pages() below
+ */
+static inline void i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
+{
+       obj->__dirty = true;
+}
+
+static inline void i915_gem_object_clear_dirty(struct drm_i915_gem_object *obj)
+{
+       obj->__dirty = false;
+}
+
+static inline bool i915_gem_object_is_dirty(struct drm_i915_gem_object *obj)
+{
+       return obj->__dirty;
+}
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pages_pin_count == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c8bd022..08c8f6b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -234,9 +234,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
        }
 
        if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
-
-       if (obj->dirty) {
+               i915_gem_object_clear_dirty(obj);
+       else if (i915_gem_object_is_dirty(obj)) {
                struct address_space *mapping = obj->base.filp->f_mapping;
                char *vaddr = obj->phys_handle->vaddr;
                int i;
@@ -260,7 +259,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
                        put_page(page);
                        vaddr += PAGE_SIZE;
                }
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
        }
 
        sg_free_table(obj->pages);
@@ -704,7 +703,7 @@ int i915_gem_obj_prepare_shmem_write(struct 
drm_i915_gem_object *obj,
                obj->cache_dirty = true;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = 1;
+       i915_gem_object_set_dirty(obj);
        /* return with the pages pinned */
        return 0;
 
@@ -1157,7 +1156,7 @@ int i915_gem_obj_prepare_shmem_write(struct 
drm_i915_gem_object *obj,
                goto out_unpin;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = true;
+       i915_gem_object_set_dirty(obj);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -2134,6 +2133,7 @@ static void i915_gem_object_free_mmap_offset(struct 
drm_i915_gem_object *obj)
 {
        struct sgt_iter sgt_iter;
        struct page *page;
+       bool dirty;
        int ret;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2153,10 +2153,11 @@ static void i915_gem_object_free_mmap_offset(struct 
drm_i915_gem_object *obj)
                i915_gem_object_save_bit_17_swizzle(obj);
 
        if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
 
+       dirty = i915_gem_object_is_dirty(obj);
        for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+               if (dirty)
                        set_page_dirty(page);
 
                if (obj->madv == I915_MADV_WILLNEED)
@@ -2164,7 +2165,7 @@ static void i915_gem_object_free_mmap_offset(struct 
drm_i915_gem_object *obj)
 
                put_page(page);
        }
-       obj->dirty = 0;
+       i915_gem_object_clear_dirty(obj);
 
        sg_free_table(obj->pages);
        kfree(obj->pages);
@@ -3265,7 +3266,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct 
drm_i915_gem_object *obj)
        if (write) {
                obj->base.read_domains = I915_GEM_DOMAIN_GTT;
                obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-               obj->dirty = 1;
+               i915_gem_object_set_dirty(obj);
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -4743,7 +4744,7 @@ struct drm_i915_gem_object *
        i915_gem_object_pin_pages(obj);
        sg = obj->pages;
        bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-       obj->dirty = 1;         /* Backing store is now out of date */
+       i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
        i915_gem_object_unpin_pages(obj);
 
        if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e9..4b9c9ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -818,6 +818,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
+               struct i915_vma *from_vma = from->engine[RCS].state;
+
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -825,9 +827,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+               i915_vma_move_to_active(from_vma, req, 0);
                /* state is kept alive until the next request */
-               i915_vma_unpin(from->engine[RCS].state);
+               i915_vma_unpin(from_vma);
                i915_gem_context_put(from);
        }
        engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33c8522..239b430 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1290,7 +1290,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       obj->dirty = 1; /* be paranoid  */
+       i915_gem_object_set_dirty(obj); /* be paranoid  */
 
        /* Add a reference if we're newly entering the active list.
         * The order in which we add operations to the retirement queue is
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930..1707aaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -674,23 +674,25 @@ struct get_pages_work {
 {
        struct sgt_iter sgt_iter;
        struct page *page;
+       bool dirty;
 
        BUG_ON(obj->userptr.work != NULL);
        __i915_gem_userptr_set_active(obj, false);
 
-       if (obj->madv != I915_MADV_WILLNEED)
-               obj->dirty = 0;
-
        i915_gem_gtt_finish_object(obj);
 
+       if (obj->madv != I915_MADV_WILLNEED)
+               i915_gem_object_clear_dirty(obj);
+
+       dirty = i915_gem_object_is_dirty(obj);
        for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+               if (dirty)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
                put_page(page);
        }
-       obj->dirty = 0;
+       i915_gem_object_clear_dirty(obj);
 
        sg_free_table(obj->pages);
        kfree(obj->pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15d..257dde1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -804,7 +804,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = vma->fence ? vma->fence->id : -1;
        err->tiling = i915_gem_object_get_tiling(obj);
-       err->dirty = obj->dirty;
+       err->dirty = i915_gem_object_is_dirty(obj);
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
        err->cache_level = obj->cache_level;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 16d7cdd..cb915af 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -707,6 +707,8 @@ static int intel_lr_context_pin(struct i915_gem_context 
*ctx,
                                struct intel_engine_cs *engine)
 {
        struct intel_context *ce = &ctx->engine[engine->id];
+       struct intel_ring *ring = ce->ring;
+       struct drm_i915_gem_object *ctx_obj;
        void *vaddr;
        u32 *lrc_reg_state;
        int ret;
@@ -721,24 +723,24 @@ static int intel_lr_context_pin(struct i915_gem_context 
*ctx,
        if (ret)
                goto err;
 
-       vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+       ctx_obj = ce->state->obj;
+       vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
                goto unpin_vma;
        }
 
-       lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-       ret = intel_ring_pin(ce->ring);
+       ret = intel_ring_pin(ring);
        if (ret)
                goto unpin_map;
 
-       intel_lr_context_descriptor_update(ctx, engine);
+       i915_gem_object_set_dirty(ctx_obj);
 
-       lrc_reg_state[CTX_RING_BUFFER_START+1] =
-               i915_ggtt_offset(ce->ring->vma);
+       lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+       lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ring->vma);
        ce->lrc_reg_state = lrc_reg_state;
-       ce->state->obj->dirty = true;
+
+       intel_lr_context_descriptor_update(ctx, engine);
 
        /* Invalidate GuC TLB. */
        if (i915.enable_guc_submission) {
@@ -1921,7 +1923,7 @@ static u32 intel_lr_indirect_ctx_offset(struct 
intel_engine_cs *engine)
                DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-       ctx_obj->dirty = true;
+       i915_gem_object_set_dirty(ctx_obj);
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
@@ -2134,23 +2136,24 @@ void intel_lr_context_resume(struct drm_i915_private 
*dev_priv)
 
        for_each_engine(engine, dev_priv) {
                struct intel_context *ce = &ctx->engine[engine->id];
+               struct drm_i915_gem_object *ctx_obj;
                void *vaddr;
                uint32_t *reg_state;
 
                if (!ce->state)
                        continue;
 
-               vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+               ctx_obj = ce->state->obj;
+               vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
                if (WARN_ON(IS_ERR(vaddr)))
                        continue;
 
                reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               ce->state->obj->dirty = true;
-               i915_gem_object_unpin_map(ce->state->obj);
+               i915_gem_object_set_dirty(ctx_obj);
+               i915_gem_object_unpin_map(ctx_obj);
 
                ce->ring->head = 0;
                ce->ring->tail = 0;
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to