From: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

Track how many callers are explicity waiting on a fence to signal and
allow querying that via new dma_fence_wait_count() API.

This provides infrastructure on top of which generic "waitboost" concepts
can be implemented by individual drivers. Wait-boosting is any reactive
activity, such as raising the GPU clocks, which happens while there are
active external waiters.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
---
 drivers/dma-buf/dma-fence.c               | 102 ++++++++++++++++------
 drivers/gpu/drm/i915/gt/intel_engine_pm.c |   1 -
 include/linux/dma-fence.h                 |  14 +++
 3 files changed, 88 insertions(+), 29 deletions(-)

diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0de0482cd36e..ed43290c0bdf 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -344,6 +344,24 @@ void __dma_fence_might_wait(void)
 }
 #endif
 
+static void incr_wait_count(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+       lockdep_assert_held(fence->lock);
+
+       __set_bit(DMA_FENCE_CB_FLAG_WAITCOUNT_BIT, &cb->flags);
+       fence->waitcount++;
+       WARN_ON_ONCE(!fence->waitcount);
+}
+
+static void decr_wait_count(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+       lockdep_assert_held(fence->lock);
+
+       if (__test_and_clear_bit(DMA_FENCE_CB_FLAG_WAITCOUNT_BIT, &cb->flags)) {
+               WARN_ON_ONCE(!fence->waitcount);
+               fence->waitcount--;
+       }
+}
 
 /**
  * dma_fence_signal_timestamp_locked - signal completion of a fence
@@ -384,6 +402,7 @@ int dma_fence_signal_timestamp_locked(struct dma_fence 
*fence,
 
        list_for_each_entry_safe(cur, tmp, &cb_list, node) {
                INIT_LIST_HEAD(&cur->node);
+               decr_wait_count(fence, cur);
                cur->func(fence, cur);
        }
 
@@ -612,35 +631,15 @@ void dma_fence_enable_sw_signaling(struct dma_fence 
*fence)
        unsigned long flags;
 
        spin_lock_irqsave(fence->lock, flags);
+       fence->waitcount++;
+       WARN_ON_ONCE(!fence->waitcount);
        __dma_fence_enable_signaling(fence);
        spin_unlock_irqrestore(fence->lock, flags);
 }
 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 
-/**
- * dma_fence_add_callback - add a callback to be called when the fence
- * is signaled
- * @fence: the fence to wait on
- * @cb: the callback to register
- * @func: the function to call
- *
- * Add a software callback to the fence. The caller should keep a reference to
- * the fence.
- *
- * @cb will be initialized by dma_fence_add_callback(), no initialization
- * by the caller is required. Any number of callbacks can be registered
- * to a fence, but a callback can only be registered to one fence at a time.
- *
- * If fence is already signaled, this function will return -ENOENT (and
- * *not* call the callback).
- *
- * Note that the callback can be called from an atomic context or irq context.
- *
- * Returns 0 in case of success, -ENOENT if the fence is already signaled
- * and -EINVAL in case of error.
- */
-int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
-                          dma_fence_func_t func)
+static int add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+                       dma_fence_func_t func, bool wait)
 {
        unsigned long flags;
        int ret = 0;
@@ -655,10 +654,15 @@ int dma_fence_add_callback(struct dma_fence *fence, 
struct dma_fence_cb *cb,
 
        spin_lock_irqsave(fence->lock, flags);
 
+       if (wait)
+               incr_wait_count(fence, cb);
+
        if (__dma_fence_enable_signaling(fence)) {
                cb->func = func;
                list_add_tail(&cb->node, &fence->cb_list);
        } else {
+               if (test_bit(DMA_FENCE_CB_FLAG_WAITCOUNT_BIT, &cb->flags))
+                       decr_wait_count(fence, cb);
                INIT_LIST_HEAD(&cb->node);
                ret = -ENOENT;
        }
@@ -667,8 +671,44 @@ int dma_fence_add_callback(struct dma_fence *fence, struct 
dma_fence_cb *cb,
 
        return ret;
 }
+
+/**
+ * dma_fence_add_callback - add a callback to be called when the fence
+ * is signaled
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
+ *
+ * Add a software callback to the fence. The caller should keep a reference to
+ * the fence.
+ *
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
+ * by the caller is required. Any number of callbacks can be registered
+ * to a fence, but a callback can only be registered to one fence at a time.
+ *
+ * If fence is already signaled, this function will return -ENOENT (and
+ * *not* call the callback).
+ *
+ * Note that the callback can be called from an atomic context or irq context.
+ *
+ * Returns 0 in case of success, -ENOENT if the fence is already signaled
+ * and -EINVAL in case of error.
+ */
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+                          dma_fence_func_t func)
+{
+       return add_callback(fence, cb, func, false);
+}
 EXPORT_SYMBOL(dma_fence_add_callback);
 
+int dma_fence_add_wait_callback(struct dma_fence *fence,
+                               struct dma_fence_cb *cb,
+                               dma_fence_func_t func)
+{
+       return add_callback(fence, cb, func, true);
+}
+EXPORT_SYMBOL(dma_fence_add_wait_callback);
+
 /**
  * dma_fence_get_status - returns the status upon completion
  * @fence: the dma_fence to query
@@ -721,8 +761,10 @@ dma_fence_remove_callback(struct dma_fence *fence, struct 
dma_fence_cb *cb)
        spin_lock_irqsave(fence->lock, flags);
 
        ret = !list_empty(&cb->node);
-       if (ret)
+       if (ret) {
+               decr_wait_count(fence, cb);
                list_del_init(&cb->node);
+       }
 
        spin_unlock_irqrestore(fence->lock, flags);
 
@@ -780,6 +822,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, 
signed long timeout)
 
        cb.base.func = dma_fence_default_wait_cb;
        cb.task = current;
+       incr_wait_count(fence, &cb.base);
        list_add(&cb.base.node, &fence->cb_list);
 
        while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 
0) {
@@ -796,8 +839,10 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, 
signed long timeout)
                        ret = -ERESTARTSYS;
        }
 
-       if (!list_empty(&cb.base.node))
+       if (!list_empty(&cb.base.node)) {
+               decr_wait_count(fence, &cb.base);
                list_del(&cb.base.node);
+       }
        __set_current_state(TASK_RUNNING);
 
 out:
@@ -875,8 +920,8 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, 
uint32_t count,
                struct dma_fence *fence = fences[i];
 
                cb[i].task = current;
-               if (dma_fence_add_callback(fence, &cb[i].base,
-                                          dma_fence_default_wait_cb)) {
+               if (dma_fence_add_wait_callback(fence, &cb[i].base,
+                                               dma_fence_default_wait_cb)) {
                        /* This fence is already signaled */
                        if (idx)
                                *idx = i;
@@ -957,6 +1002,7 @@ dma_fence_init(struct dma_fence *fence, const struct 
dma_fence_ops *ops,
        fence->context = context;
        fence->seqno = seqno;
        fence->flags = 0UL;
+       fence->waitcount = 0;
        fence->error = 0;
 
        trace_dma_fence_init(fence);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e971b153fda9..2693a0151a6b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -218,7 +218,6 @@ static bool switch_to_kernel_context(struct intel_engine_cs 
*engine)
                 * until the background request retirement running every
                 * second or two).
                 */
-               BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
                dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
                rq->duration.emitted = ktime_get();
        }
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 775cdc0b4f24..d0ed923e4545 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -92,6 +92,7 @@ struct dma_fence {
        u64 seqno;
        unsigned long flags;
        struct kref refcount;
+       unsigned int waitcount;
        int error;
 };
 
@@ -116,6 +117,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
 struct dma_fence_cb {
        struct list_head node;
        dma_fence_func_t func;
+       unsigned long flags;
+};
+
+enum dma_fence_cb_flag_bits {
+       DMA_FENCE_CB_FLAG_WAITCOUNT_BIT,
 };
 
 /**
@@ -377,6 +383,9 @@ signed long dma_fence_default_wait(struct dma_fence *fence,
 int dma_fence_add_callback(struct dma_fence *fence,
                           struct dma_fence_cb *cb,
                           dma_fence_func_t func);
+int dma_fence_add_wait_callback(struct dma_fence *fence,
+                               struct dma_fence_cb *cb,
+                               dma_fence_func_t func);
 bool dma_fence_remove_callback(struct dma_fence *fence,
                               struct dma_fence_cb *cb);
 void dma_fence_enable_sw_signaling(struct dma_fence *fence);
@@ -528,6 +537,11 @@ static inline int dma_fence_get_status_locked(struct 
dma_fence *fence)
 
 int dma_fence_get_status(struct dma_fence *fence);
 
+static inline unsigned int dma_fence_wait_count(struct dma_fence *fence)
+{
+       return fence->waitcount;
+}
+
 /**
  * dma_fence_set_error - flag an error condition on the fence
  * @fence: the dma_fence
-- 
2.34.1

Reply via email to