Now that we have a name for the previously anonymous per-engine structure
embedded inside the intel_context for the benefit of execlist mode, we
can optimise a few more places that access this array of structures.
This may improve the compiler's ability to avoid redundant dereference
and index operations.

Signed-off-by: Dave Gordon <[email protected]>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 13 +++++--------
 drivers/gpu/drm/i915/i915_guc_submission.c | 17 +++++++++--------
 2 files changed, 14 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index ccdca2c..ce3b5e9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1966,16 +1966,13 @@ static int i915_context_status(struct seq_file *m, void 
*unused)
                if (i915.enable_execlists) {
                        seq_putc(m, '\n');
                        for_each_engine(engine, dev_priv, i) {
-                               struct drm_i915_gem_object *ctx_obj =
-                                       ctx->engine[i].state;
-                               struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[i].ringbuf;
+                               struct intel_engine_ctx *ectx = &ctx->engine[i];
 
                                seq_printf(m, "%s: ", engine->name);
-                               if (ctx_obj)
-                                       describe_obj(m, ctx_obj);
-                               if (ringbuf)
-                                       describe_ctx_ringbuf(m, ringbuf);
+                               if (ectx->state)
+                                       describe_obj(m, ectx->state);
+                               if (ectx->ringbuf)
+                                       describe_ctx_ringbuf(m, ectx->ringbuf);
                                seq_putc(m, '\n');
                        }
                } else {
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index ae1f58d..7352023 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -392,6 +392,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 
        for_each_engine(engine, dev_priv, i) {
                struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
+               struct intel_engine_ctx *ectx = &ctx->engine[i];
                struct drm_i915_gem_object *obj;
                uint64_t ctx_desc;
 
@@ -402,7 +403,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                 * for now who owns a GuC client. But for future owner of GuC
                 * client, need to make sure lrc is pinned prior to enter here.
                 */
-               obj = ctx->engine[i].state;
+               obj = ectx->state;
                if (!obj)
                        break;  /* XXX: continue? */
 
@@ -415,7 +416,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-               obj = ctx->engine[i].ringbuf->obj;
+               obj = ectx->ringbuf->obj;
 
                lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
                lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
@@ -987,19 +988,19 @@ int intel_guc_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx;
+       struct intel_engine_ctx *ectx;
        u32 data[3];
 
        if (!i915.enable_guc_submission)
                return 0;
 
-       ctx = dev_priv->kernel_context;
+       ectx = &dev_priv->kernel_context->engine[RCS];
 
        data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
        /* any value greater than GUC_POWER_D0 */
        data[1] = GUC_POWER_D1;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = i915_gem_obj_ggtt_offset(ectx->state);
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
@@ -1013,18 +1014,18 @@ int intel_guc_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx;
+       struct intel_engine_ctx *ectx;
        u32 data[3];
 
        if (!i915.enable_guc_submission)
                return 0;
 
-       ctx = dev_priv->kernel_context;
+;      ectx = &dev_priv->kernel_context->engine[RCS];
 
        data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
        data[1] = GUC_POWER_D0;
        /* first page is shared data with GuC */
-       data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = i915_gem_obj_ggtt_offset(ectx->state);
 
        return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to