Move insert_request() earlier to avoid a forward declaration in a later
patch.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c | 128 +++++++++++++++++++--------------------
 1 file changed, 64 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ad61d1998fb7..1b2f0e3d383a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -292,6 +292,70 @@ uint64_t intel_lr_context_descriptor(struct 
i915_gem_context *ctx,
        return ctx->engine[engine->id].lrc_desc;
 }
 
+static bool
+insert_request(struct intel_engine_cs *engine,
+              struct i915_priotree *pt,
+              int prio)
+{
+       struct i915_priolist *p;
+       struct rb_node **parent, *rb;
+       bool first = true;
+
+       if (unlikely(engine->no_priolist))
+               prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+       /* most positive priority is scheduled first, equal priorities fifo */
+       rb = NULL;
+       parent = &engine->execlist_queue.rb_node;
+       while (*parent) {
+               rb = *parent;
+               p = rb_entry(rb, typeof(*p), node);
+               if (prio > p->priority) {
+                       parent = &rb->rb_left;
+               } else if (prio < p->priority) {
+                       parent = &rb->rb_right;
+                       first = false;
+               } else {
+                       list_add_tail(&pt->link, &p->requests);
+                       return false;
+               }
+       }
+
+       if (prio == I915_PRIORITY_NORMAL) {
+               p = &engine->default_priolist;
+       } else {
+               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+               /* Convert an allocation failure to a priority bump */
+               if (unlikely(!p)) {
+                       prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+                       /* To maintain ordering with all rendering, after an
+                        * allocation failure we have to disable all scheduling.
+                        * Requests will then be executed in fifo, and schedule
+                        * will ensure that dependencies are emitted in fifo.
+                        * There will be still some reordering with existing
+                        * requests, so if userspace lied about their
+                        * dependencies that reordering may be visible.
+                        */
+                       engine->no_priolist = true;
+                       goto find_priolist;
+               }
+       }
+
+       p->priority = prio;
+       rb_link_node(&p->node, rb, parent);
+       rb_insert_color(&p->node, &engine->execlist_queue);
+
+       INIT_LIST_HEAD(&p->requests);
+       list_add_tail(&pt->link, &p->requests);
+
+       if (first)
+               engine->execlist_first = &p->node;
+
+       return first;
+}
+
 static inline void
 execlists_context_status_change(struct drm_i915_gem_request *rq,
                                unsigned long status)
@@ -649,70 +713,6 @@ static void intel_lrc_irq_handler(unsigned long data)
        intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
-static bool
-insert_request(struct intel_engine_cs *engine,
-              struct i915_priotree *pt,
-              int prio)
-{
-       struct i915_priolist *p;
-       struct rb_node **parent, *rb;
-       bool first = true;
-
-       if (unlikely(engine->no_priolist))
-               prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
-       /* most positive priority is scheduled first, equal priorities fifo */
-       rb = NULL;
-       parent = &engine->execlist_queue.rb_node;
-       while (*parent) {
-               rb = *parent;
-               p = rb_entry(rb, typeof(*p), node);
-               if (prio > p->priority) {
-                       parent = &rb->rb_left;
-               } else if (prio < p->priority) {
-                       parent = &rb->rb_right;
-                       first = false;
-               } else {
-                       list_add_tail(&pt->link, &p->requests);
-                       return false;
-               }
-       }
-
-       if (prio == I915_PRIORITY_NORMAL) {
-               p = &engine->default_priolist;
-       } else {
-               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
-               /* Convert an allocation failure to a priority bump */
-               if (unlikely(!p)) {
-                       prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
-                       /* To maintain ordering with all rendering, after an
-                        * allocation failure we have to disable all scheduling.
-                        * Requests will then be executed in fifo, and schedule
-                        * will ensure that dependencies are emitted in fifo.
-                        * There will be still some reordering with existing
-                        * requests, so if userspace lied about their
-                        * dependencies that reordering may be visible.
-                        */
-                       engine->no_priolist = true;
-                       goto find_priolist;
-               }
-       }
-
-       p->priority = prio;
-       rb_link_node(&p->node, rb, parent);
-       rb_insert_color(&p->node, &engine->execlist_queue);
-
-       INIT_LIST_HEAD(&p->requests);
-       list_add_tail(&pt->link, &p->requests);
-
-       if (first)
-               engine->execlist_first = &p->node;
-
-       return first;
-}
-
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-- 
2.13.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to