Hey Chris,

From the i915/perf point of view, I'm fine with this change.
The pinning of the hw_id when monitoring a single context (with OA) doesn't break the existing userspace (I can only think of Mesa).


I'm also trying to build up a system wide monitoring feature in GPUTop with a timeline display. This change makes it a bit more challenging.
But this isn't really an expected feature, it's just nice to have.

What I'm thinking of would be to keep a circular buffer of requests in the order they're submitted to an engine. Then the i915 perf driver could correlate between the context-switch tagged reports coming from OA and the requests submitted. Much like the OA buffer, this circular buffer could overflow at which point we signal the application using the i915 perf driver and it'll most likely close the driver and try again.

I would need that have the hw_id added to the requests. Does that sounds possible?

Thanks,

-
Lionel

On 09/02/18 10:22, Chris Wilson wrote:
Future gen reduce the number of bits we will have available to
differentiate between contexts, so reduce the lifetime of the ID
assignment from that of the context to its current active cycle (i.e.
only while it is pinned for use by the HW, will it have a constant ID).
This means that instead of a max of 2k allocated contexts (worst case
before fun with bit twiddling), we instead have a limit of 2k in flight
contexts (minus a few that have been pinned by the kernel or by perf).

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwer...@intel.com>
Cc: Oscar Mateo <oscar.ma...@intel.com
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Mika Kuoppala <mika.kuopp...@intel.com>
Cc: Michel Thierry <michel.thie...@intel.com>
---
  drivers/gpu/drm/i915/i915_debugfs.c              |   5 +-
  drivers/gpu/drm/i915/i915_drv.h                  |   1 +
  drivers/gpu/drm/i915/i915_gem_context.c          | 185 +++++++++++++++++------
  drivers/gpu/drm/i915/i915_gem_context.h          |   5 +
  drivers/gpu/drm/i915/i915_perf.c                 |  48 +++---
  drivers/gpu/drm/i915/i915_request.c              |   6 +-
  drivers/gpu/drm/i915/i915_request.h              |   2 +-
  drivers/gpu/drm/i915/i915_trace.h                |   2 +-
  drivers/gpu/drm/i915/intel_lrc.c                 |   7 +
  drivers/gpu/drm/i915/selftests/mock_context.c    |  11 +-
  drivers/gpu/drm/i915/selftests/mock_gem_device.c |  18 +--
  11 files changed, 191 insertions(+), 99 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index e7525920f768..0710366c73fa 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1897,7 +1897,10 @@ static int i915_context_status(struct seq_file *m, void 
*unused)
                return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-               seq_printf(m, "HW context %u ", ctx->hw_id);
+               seq_puts(m, "HW context ");
+               if (!list_empty(&ctx->hw_id_link))
+                       seq_printf(m, "%x [pin %u]",
+                                  ctx->hw_id, ctx->pin_hw_id);
                if (ctx->pid) {
                        struct task_struct *task;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1af02e6e841..56efe2982cb9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2093,6 +2093,7 @@ struct drm_i915_private {
                 */
                struct ida hw_ida;
  #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+               struct list_head hw_id_list;
        } contexts;
u32 fdi_rx_config;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index a73340ae9419..ee82654cf20e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,11 +141,15 @@ static void i915_gem_context_free(struct i915_gem_context 
*ctx)
list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
+       if (!list_empty(&ctx->hw_id_link)) {
+               ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
+               list_del(&ctx->hw_id_link);
+       }
+
        kfree_rcu(ctx, rcu);
  }
-static void contexts_free(struct drm_i915_private *i915)
+static bool contexts_free(struct drm_i915_private *i915)
  {
        struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
        struct i915_gem_context *ctx, *cn;
@@ -154,6 +158,8 @@ static void contexts_free(struct drm_i915_private *i915)
llist_for_each_entry_safe(ctx, cn, freed, free_link)
                i915_gem_context_free(ctx);
+
+       return freed;
  }
static void contexts_free_first(struct drm_i915_private *i915)
@@ -208,28 +214,6 @@ static void context_close(struct i915_gem_context *ctx)
        i915_gem_context_put(ctx);
  }
-static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
-{
-       int ret;
-
-       ret = ida_simple_get(&dev_priv->contexts.hw_ida,
-                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
-       if (ret < 0) {
-               /* Contexts are only released when no longer active.
-                * Flush any pending retires to hopefully release some
-                * stale contexts and try again.
-                */
-               i915_retire_requests(dev_priv);
-               ret = ida_simple_get(&dev_priv->contexts.hw_ida,
-                                    0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
-               if (ret < 0)
-                       return ret;
-       }
-
-       *out = ret;
-       return 0;
-}
-
  static u32 default_desc_template(const struct drm_i915_private *i915,
                                 const struct i915_hw_ppgtt *ppgtt)
  {
@@ -265,12 +249,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        if (ctx == NULL)
                return ERR_PTR(-ENOMEM);
- ret = assign_hw_id(dev_priv, &ctx->hw_id);
-       if (ret) {
-               kfree(ctx);
-               return ERR_PTR(ret);
-       }
-
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->contexts.list);
        ctx->i915 = dev_priv;
@@ -278,6 +256,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
+       INIT_LIST_HEAD(&ctx->hw_id_link);
/* Default context will never have a file_priv */
        ret = DEFAULT_CONTEXT_HANDLE;
@@ -413,15 +392,35 @@ i915_gem_context_create_gvt(struct drm_device *dev)
        return ctx;
  }
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+       struct i915_gem_context *ctx;
+
+       /* Keep the context ref so that we can free it immediately ourselves */
+       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+       context_close(ctx);
+       i915_gem_context_free(ctx);
+}
+
  struct i915_gem_context *
  i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
  {
        struct i915_gem_context *ctx;
+       int err;
ctx = i915_gem_create_context(i915, NULL);
        if (IS_ERR(ctx))
                return ctx;
+ err = i915_gem_context_pin_hw_id(ctx);
+       if (err) {
+               destroy_kernel_context(&ctx);
+               return ERR_PTR(err);
+       }
+
        i915_gem_context_clear_bannable(ctx);
        ctx->priority = prio;
        ctx->ring_size = PAGE_SIZE;
@@ -431,17 +430,17 @@ i915_gem_context_create_kernel(struct drm_i915_private 
*i915, int prio)
        return ctx;
  }
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
+static void init_contexts(struct drm_i915_private *i915)
  {
-       struct i915_gem_context *ctx;
+       INIT_LIST_HEAD(&i915->contexts.list);
- /* Keep the context ref so that we can free it immediately ourselves */
-       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+       /* Using the simple ida interface, the max is limited by sizeof(int) */
+       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+       ida_init(&i915->contexts.hw_ida);
+       INIT_LIST_HEAD(&i915->contexts.hw_id_list);
- context_close(ctx);
-       i915_gem_context_free(ctx);
+       INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
+       init_llist_head(&i915->contexts.free_list);
  }
static bool needs_preempt_context(struct drm_i915_private *i915)
@@ -457,13 +456,7 @@ int i915_gem_contexts_init(struct drm_i915_private 
*dev_priv)
        GEM_BUG_ON(dev_priv->kernel_context);
        GEM_BUG_ON(dev_priv->preempt_context);
- INIT_LIST_HEAD(&dev_priv->contexts.list);
-       INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
-       init_llist_head(&dev_priv->contexts.free_list);
-
-       /* Using the simple ida interface, the max is limited by sizeof(int) */
-       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
-       ida_init(&dev_priv->contexts.hw_ida);
+       init_contexts(dev_priv);
/* lowest priority; idle task */
        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
@@ -476,6 +469,7 @@ int i915_gem_contexts_init(struct drm_i915_private 
*dev_priv)
         * all user contexts will have non-zero hw_id.
         */
        GEM_BUG_ON(ctx->hw_id);
+       GEM_BUG_ON(!ctx->pin_hw_id);
        dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
@@ -521,6 +515,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
        destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
+       GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
        ida_destroy(&i915->contexts.hw_ida);
  }
@@ -865,6 +860,106 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
        return ret;
  }
+static inline int new_hw_id(struct drm_i915_private *i915)
+{
+       return ida_simple_get(&i915->contexts.hw_ida,
+                             0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+}
+
+static int steal_hw_id(struct drm_i915_private *i915)
+{
+       struct i915_gem_context *ctx, *cn;
+       LIST_HEAD(pinned);
+       int id = 0;
+
+       list_for_each_entry_safe(ctx, cn,
+                                &i915->contexts.hw_id_list, hw_id_link) {
+               if (ctx->pin_hw_id) {
+                       list_move_tail(&ctx->hw_id_link, &pinned);
+                       continue;
+               }
+
+               GEM_BUG_ON(!ctx->hw_id);
+               list_del_init(&ctx->hw_id_link);
+               id = ctx->hw_id;
+               break;
+       }
+
+       list_splice_tail(&pinned, &i915->contexts.hw_id_list);
+       return id;
+}
+
+static int assign_hw_id(struct drm_i915_private *i915, unsigned *out)
+{
+       int ret;
+
+       ret = new_hw_id(i915);
+       if (unlikely(ret < 0)) {
+               ret = steal_hw_id(i915);
+               if (ret)
+                       goto out;
+
+               /*
+                * Contexts are only released when no longer active.
+                * Flush any pending retires to hopefully release some
+                * stale contexts and try again.
+                */
+               if (i915_retire_requests(i915)) {
+                       ret = steal_hw_id(i915);
+                       if (ret)
+                               goto out;
+
+                       ret = i915_gem_wait_for_idle(i915,
+                                                    I915_WAIT_INTERRUPTIBLE |
+                                                    I915_WAIT_LOCKED);
+                       if (ret)
+                               return ret;
+
+                       ret = steal_hw_id(i915);
+                       if (ret)
+                               goto out;
+               }
+
+               /* One last attempt, to determine the errno */
+               ret = -ENOSPC;
+               if (contexts_free(i915)) {
+                       ret = new_hw_id(i915);
+                       GEM_BUG_ON(ret == 0);
+               }
+       }
+
+out:
+       *out = ret;
+       return 0;
+}
+
+int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+{
+       int err;
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       GEM_BUG_ON(ctx->pin_hw_id == ~0u);
+       if (!ctx->pin_hw_id++ && list_empty(&ctx->hw_id_link)) {
+               err = assign_hw_id(ctx->i915, &ctx->hw_id);
+               if (err)
+                       return err;
+
+               list_add_tail(&ctx->hw_id_link,
+                             &ctx->i915->contexts.hw_id_list);
+       }
+
+       return 0;
+}
+
+void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+{
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       GEM_BUG_ON(ctx->pin_hw_id == 0u);
+       --ctx->pin_hw_id;
+}
+
  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  #include "selftests/mock_context.c"
  #include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h 
b/drivers/gpu/drm/i915/i915_gem_context.h
index 2ca7d0289abb..d4a9a9ccf0b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -125,6 +125,8 @@ struct i915_gem_context {
         * id for the lifetime of the context.
         */
        unsigned int hw_id;
+       unsigned int pin_hw_id;
+       struct list_head hw_id_link;
/**
         * @user_handle: userspace identifier
@@ -254,6 +256,9 @@ static inline void 
i915_gem_context_set_force_single_submission(struct i915_gem_
        __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
  }
+int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
+void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx);
+
  static inline bool i915_gem_context_is_default(const struct i915_gem_context 
*c)
  {
        return c->user_handle == DEFAULT_CONTEXT_HANDLE;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f464c3737228..db98239010f9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1215,30 +1215,22 @@ static int i915_oa_read(struct i915_perf_stream *stream,
  static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  {
        struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[RCS];
+       struct intel_ring *ring;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+       if (ret)
+               return ret;
+
+       ring = engine->context_pin(engine, stream->ctx);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       if (IS_ERR(ring))
+               return PTR_ERR(ring);
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
                dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
        } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
-               struct intel_ring *ring;
-               int ret;
-
-               ret = i915_mutex_lock_interruptible(&dev_priv->drm);
-               if (ret)
-                       return ret;
-
-               /*
-                * As the ID is the gtt offset of the context's vma we
-                * pin the vma to ensure the ID remains fixed.
-                *
-                * NB: implied RCS engine...
-                */
-               ring = engine->context_pin(engine, stream->ctx);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-               if (IS_ERR(ring))
-                       return PTR_ERR(ring);
-
-
                /*
                 * Explicitly track the ID (instead of calling
                 * i915_ggtt_offset() on the fly) considering the difference
@@ -1261,19 +1253,13 @@ static int oa_get_render_ctx_id(struct i915_perf_stream 
*stream)
  static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  {
        struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[RCS];
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-       } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
-
-               mutex_lock(&dev_priv->drm.struct_mutex);
-
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-               engine->context_unpin(engine, stream->ctx);
+       dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-       }
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       engine->context_unpin(engine, stream->ctx);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
  }
static void
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index ba52b3060f88..559bc1914ca5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1391,7 +1391,7 @@ static void engine_retire_requests(struct intel_engine_cs 
*engine)
                i915_request_retire(request);
  }
-void i915_retire_requests(struct drm_i915_private *i915)
+bool i915_retire_requests(struct drm_i915_private *i915)
  {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -1399,10 +1399,12 @@ void i915_retire_requests(struct drm_i915_private *i915)
        lockdep_assert_held(&i915->drm.struct_mutex);
if (!i915->gt.active_requests)
-               return;
+               return false;
for_each_engine(engine, i915, id)
                engine_retire_requests(engine);
+
+       return true;
  }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 74311fc53e2f..5a19cf7d3569 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -351,7 +351,7 @@ static inline bool i915_priotree_signaled(const struct 
i915_priotree *pt)
        return i915_request_completed(rq);
  }
-void i915_retire_requests(struct drm_i915_private *i915);
+bool i915_retire_requests(struct drm_i915_private *i915);
/*
   * We treat requests as fences. This is not be to confused with our
diff --git a/drivers/gpu/drm/i915/i915_trace.h 
b/drivers/gpu/drm/i915/i915_trace.h
index 408827bf5d96..ac64d0a29664 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -948,7 +948,7 @@ DECLARE_EVENT_CLASS(i915_context,
        TP_fast_assign(
                        __entry->dev = ctx->i915->drm.primary->index;
                        __entry->ctx = ctx;
-                       __entry->hw_id = ctx->hw_id;
+                       __entry->hw_id = ctx->pin_hw_id ? ctx->hw_id : ~0u;
                        __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
        ),
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0ecd2e30fd13..fca08ee0eb53 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1109,6 +1109,10 @@ execlists_context_pin(struct intel_engine_cs *engine,
        if (ret)
                goto unpin_map;
+ ret = i915_gem_context_pin_hw_id(ctx);
+       if (ret)
+               goto unpin_ring;
+
        intel_lr_context_descriptor_update(ctx, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1120,6 +1124,8 @@ execlists_context_pin(struct intel_engine_cs *engine,
  out:
        return ce->ring;
+unpin_ring:
+       intel_ring_unpin(ce->ring);
  unpin_map:
        i915_gem_object_unpin_map(ce->state->obj);
  unpin_vma:
@@ -1140,6 +1146,7 @@ static void execlists_context_unpin(struct 
intel_engine_cs *engine,
        if (--ce->pin_count)
                return;
+ i915_gem_context_unpin_hw_id(ctx);
        intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c 
b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..a3f35c2284df 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -42,12 +42,11 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
+       INIT_LIST_HEAD(&ctx->hw_id_link);
- ret = ida_simple_get(&i915->contexts.hw_ida,
-                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+       ret = i915_gem_context_pin_hw_id(ctx);
        if (ret < 0)
                goto err_handles;
-       ctx->hw_id = ret;
if (name) {
                ctx->name = kstrdup(name, GFP_KERNEL);
@@ -78,11 +77,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
  {
-       INIT_LIST_HEAD(&i915->contexts.list);
-       ida_init(&i915->contexts.hw_ida);
-
-       INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
-       init_llist_head(&i915->contexts.free_list);
+       init_contexts(i915);
  }
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e6d4b882599a..6d0c0c3387e4 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -135,8 +135,6 @@ static struct dev_pm_domain pm_domain = {
  struct drm_i915_private *mock_gem_device(void)
  {
        struct drm_i915_private *i915;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        struct pci_dev *pdev;
        int err;
@@ -232,24 +230,24 @@ struct drm_i915_private *mock_gem_device(void)
        }
mock_init_ggtt(i915);
+
+       i915->kernel_context = mock_context(i915, NULL);
+       if (!i915->kernel_context)
+               goto err_priorities;
+
        mutex_unlock(&i915->drm.struct_mutex);
mkwrite_device_info(i915)->ring_mask = BIT(0);
        i915->engine[RCS] = mock_engine(i915, "mock", RCS);
        if (!i915->engine[RCS])
-               goto err_priorities;
-
-       i915->kernel_context = mock_context(i915, NULL);
-       if (!i915->kernel_context)
-               goto err_engine;
+               goto err_contexts;
WARN_ON(i915_gemfs_init(i915)); return i915; -err_engine:
-       for_each_engine(engine, i915, id)
-               mock_engine_free(engine);
+err_contexts:
+       i915_gem_contexts_fini(i915);
  err_priorities:
        kmem_cache_destroy(i915->priorities);
  err_dependencies:


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to