This patch, or

commit 20311bd35060435badba8a0d46b06d5d184abaf7
Author: Chris Wilson <ch...@chris-wilson.co.uk>
Date:   Mon Nov 14 20:41:03 2016 +0000

    drm/i915/scheduler: Execute requests in order of priorities

tricks sparse into warnings. It makes me unhappy to see the sparse
warnings accumulate because that will eventually render sparse useless.

The warnings in-line on the things being warned about.

BR,
Jani.


On Mon, 14 Nov 2016, Chris Wilson <ch...@chris-wilson.co.uk> wrote:
> @@ -634,13 +667,112 @@ static void execlists_submit_request(struct 
> drm_i915_gem_request *request)
>       /* Will be called from irq-context when using foreign fences. */
>       spin_lock_irqsave(&engine->timeline->lock, flags);
>  
> -     list_add_tail(&request->execlist_link, &engine->execlist_queue);
> +     if (insert_request(&request->priotree, &engine->execlist_queue))
> +             engine->execlist_first = &request->priotree.node;
>       if (execlists_elsp_idle(engine))
>               tasklet_hi_schedule(&engine->irq_tasklet);
>  
>       spin_unlock_irqrestore(&engine->timeline->lock, flags);
>  }
>  
> +static struct intel_engine_cs *
> +pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
> +{
> +     struct intel_engine_cs *engine;
> +
> +     engine = container_of(pt,
> +                           struct drm_i915_gem_request,
> +                           priotree)->engine;
> +     if (engine != locked) {
> +             if (locked)
> +                     spin_unlock_irq(&locked->timeline->lock);

drivers/gpu/drm/i915/intel_lrc.c:688:40: warning: context imbalance in 
'pt_lock_engine' - unexpected unlock

> +             spin_lock_irq(&engine->timeline->lock);
> +     }
> +
> +     return engine;
> +}
> +
> +static void execlists_schedule(struct drm_i915_gem_request *request, int 
> prio)
> +{
> +     struct intel_engine_cs *engine = NULL;
> +     struct i915_dependency *dep, *p;
> +     struct i915_dependency stack;
> +     LIST_HEAD(dfs);
> +
> +     if (prio <= READ_ONCE(request->priotree.priority))
> +             return;
> +
> +     /* Need BKL in order to use the temporary link inside i915_dependency */
> +     lockdep_assert_held(&request->i915->drm.struct_mutex);
> +
> +     stack.signaler = &request->priotree;
> +     list_add(&stack.dfs_link, &dfs);
> +
> +     /* Recursively bump all dependent priorities to match the new request.
> +      *
> +      * A naive approach would be to use recursion:
> +      * static void update_priorities(struct i915_priotree *pt, prio) {
> +      *      list_for_each_entry(dep, &pt->signalers_list, signal_link)
> +      *              update_priorities(dep->signal, prio)
> +      *      insert_request(pt);
> +      * }
> +      * but that may have unlimited recursion depth and so runs a very
> +      * real risk of overunning the kernel stack. Instead, we build
> +      * a flat list of all dependencies starting with the current request.
> +      * As we walk the list of dependencies, we add all of its dependencies
> +      * to the end of the list (this may include an already visited
> +      * request) and continue to walk onwards onto the new dependencies. The
> +      * end result is a topological list of requests in reverse order, the
> +      * last element in the list is the request we must execute first.
> +      */
> +     list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
> +             struct i915_priotree *pt = dep->signaler;
> +
> +             list_for_each_entry(p, &pt->signalers_list, signal_link)
> +                     if (prio > READ_ONCE(p->signaler->priority))
> +                             list_move_tail(&p->dfs_link, &dfs);
> +
> +             p = list_next_entry(dep, dfs_link);
> +             if (!RB_EMPTY_NODE(&pt->node))
> +                     continue;
> +
> +             engine = pt_lock_engine(pt, engine);
> +
> +             /* If it is not already in the rbtree, we can update the
> +              * priority inplace and skip over it (and its dependencies)
> +              * if it is referenced *again* as we descend the dfs.
> +              */
> +             if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
> +                     pt->priority = prio;
> +                     list_del_init(&dep->dfs_link);
> +             }
> +     }
> +
> +     /* Fifo and depth-first replacement ensure our deps execute before us */
> +     list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
> +             struct i915_priotree *pt = dep->signaler;
> +
> +             INIT_LIST_HEAD(&dep->dfs_link);
> +
> +             engine = pt_lock_engine(pt, engine);
> +
> +             if (prio <= pt->priority)
> +                     continue;
> +
> +             GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
> +
> +             pt->priority = prio;
> +             rb_erase(&pt->node, &engine->execlist_queue);
> +             if (insert_request(pt, &engine->execlist_queue))
> +                     engine->execlist_first = &pt->node;
> +     }
> +
> +     if (engine)
> +             spin_unlock_irq(&engine->timeline->lock);

drivers/gpu/drm/i915/intel_lrc.c:771:32: warning: context imbalance in 
'execlists_schedule' - unexpected unlock



-- 
Jani Nikula, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to