---
drivers/dma-buf/dma-fence.c | 48 ++++++++++-----------
drivers/dma-buf/st-dma-fence.c | 6 ++-
drivers/dma-buf/sw_sync.c | 14 +++---
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 4 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +-
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 2 +-
drivers/gpu/drm/i915/i915_active.c | 19 ++++----
drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++-
drivers/gpu/drm/scheduler/sched_fence.c | 6 +--
drivers/gpu/drm/xe/xe_sched_job.c | 4 +-
include/linux/dma-fence.h | 38 ++++++++++++++++
11 files changed, 95 insertions(+), 55 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index ba02321bef0b..56aa59867eaa 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -365,7 +365,7 @@ void dma_fence_signal_timestamp_locked(struct dma_fence
*fence,
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags)))
@@ -412,9 +412,9 @@ void dma_fence_signal_timestamp(struct dma_fence *fence,
ktime_t timestamp)
if (WARN_ON(!fence))
return;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, timestamp);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_signal_timestamp);
@@ -473,9 +473,9 @@ bool dma_fence_check_and_signal(struct dma_fence *fence)
unsigned long flags;
bool ret;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
ret = dma_fence_check_and_signal_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -501,9 +501,9 @@ void dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
dma_fence_signal_timestamp_locked(fence, ktime_get());
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
dma_fence_end_signalling(tmp);
}
@@ -603,10 +603,10 @@ void dma_fence_release(struct kref *kref)
* don't leave chains dangling. We set the error flag first
* so that the callbacks know this signal is due to an error.
*/
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
fence->error = -EDEADLK;
dma_fence_signal_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
ops = rcu_dereference(fence->ops);
@@ -636,7 +636,7 @@ static bool __dma_fence_enable_signaling(struct dma_fence
*fence)
const struct dma_fence_ops *ops;
bool was_set;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
@@ -672,9 +672,9 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
__dma_fence_enable_signaling(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -714,8 +714,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
return -ENOENT;
}
- spin_lock_irqsave(fence->lock, flags);
-
+ dma_fence_lock_irqsave(fence, flags);
if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
@@ -723,8 +722,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct
dma_fence_cb *cb,
INIT_LIST_HEAD(&cb->node);
ret = -ENOENT;
}
-
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -747,9 +745,9 @@ int dma_fence_get_status(struct dma_fence *fence)
unsigned long flags;
int status;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
status = dma_fence_get_status_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return status;
}
@@ -779,13 +777,11 @@ dma_fence_remove_callback(struct dma_fence *fence, struct
dma_fence_cb *cb)
unsigned long flags;
bool ret;
- spin_lock_irqsave(fence->lock, flags);
-
+ dma_fence_lock_irqsave(fence, flags);
ret = !list_empty(&cb->node);
if (ret)
list_del_init(&cb->node);
-
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
@@ -824,7 +820,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr,
signed long timeout)
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (dma_fence_test_signaled_flag(fence))
goto out;
@@ -848,11 +844,11 @@ dma_fence_default_wait(struct dma_fence *fence, bool
intr, signed long timeout)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
ret = schedule_timeout(ret);
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS;
}
@@ -862,7 +858,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr,
signed long timeout)
__set_current_state(TASK_RUNNING);
out:
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return ret;
}
EXPORT_SYMBOL(dma_fence_default_wait);
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index 73ed6fd48a13..5d0d9abc6e21 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -410,8 +410,10 @@ struct race_thread {
static void __wait_for_callbacks(struct dma_fence *f)
{
- spin_lock_irq(f->lock);
- spin_unlock_irq(f->lock);
+ unsigned long flags;
+
+ dma_fence_lock_irqsave(f, flags);
+ dma_fence_unlock_irqrestore(f, flags);
}
static int thread_signal_callback(void *arg)
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 6f09d13be6b6..4c81a37dd682 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -156,12 +156,12 @@ static void timeline_fence_release(struct dma_fence
*fence)
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!list_empty(&pt->link)) {
list_del(&pt->link);
rb_erase(&pt->node, &parent->pt_tree);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -179,7 +179,7 @@ static void timeline_fence_set_deadline(struct dma_fence
*fence, ktime_t deadlin
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
if (ktime_before(deadline, pt->deadline))
pt->deadline = deadline;
@@ -187,7 +187,7 @@ static void timeline_fence_set_deadline(struct dma_fence
*fence, ktime_t deadlin
pt->deadline = deadline;
__set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
}
static const struct dma_fence_ops timeline_fence_ops = {
@@ -431,13 +431,13 @@ static int sw_sync_ioctl_get_deadline(struct
sync_timeline *obj, unsigned long a
goto put_fence;
}
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
goto unlock;
}
data.deadline_ns = ktime_to_ns(pt->deadline);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
dma_fence_put(fence);
@@ -450,7 +450,7 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return 0;
unlock:
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
put_fence:
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b82357c65723..1404e1fe62a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -479,10 +479,10 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring,
unsigned int vmid,
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery ||
!fence)
return false;
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, -ENODATA);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a2ea200d90c..4761e7486811 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2802,8 +2802,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct
amdgpu_vm *vm)
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
/* Make sure that all fence callbacks have completed */
- spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
- spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
+ dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
+ dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index bf6117d5fc57..78ea2d9ccedf 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -148,7 +148,7 @@ __dma_fence_signal__notify(struct dma_fence *fence,
{
struct dma_fence_cb *cur, *tmp;
- lockdep_assert_held(fence->lock);
+ dma_fence_assert_held(fence);
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
diff --git a/drivers/gpu/drm/i915/i915_active.c
b/drivers/gpu/drm/i915/i915_active.c
index 6b0c1162505a..9d41e052ab65 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -1045,9 +1045,10 @@ __i915_active_fence_set(struct i915_active_fence *active,
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (prev)
- spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ spin_lock_nested(dma_fence_spinlock(prev),
+ SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
@@ -1061,17 +1062,18 @@ __i915_active_fence_set(struct i915_active_fence
*active,
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
- spin_unlock(prev->lock);
+ spin_unlock(dma_fence_spinlock(prev));
dma_fence_put(prev);
}
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
- spin_lock_irqsave(fence->lock, flags);
+ dma_fence_lock_irqsave(fence, flags);
if (prev)
- spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ spin_lock_nested(dma_fence_spinlock(prev),
+ SINGLE_DEPTH_NESTING);
}
/*
@@ -1088,10 +1090,11 @@ __i915_active_fence_set(struct i915_active_fence
*active,
*/
if (prev) {
__list_del_entry(&active->cb.node);
- spin_unlock(prev->lock); /* serialise with prev->cb_list */
+ /* serialise with prev->cb_list */
+ spin_unlock(dma_fence_spinlock(prev));
}
list_add_tail(&active->cb.node, &fence->cb_list);
- spin_unlock_irqrestore(fence->lock, flags);
+ dma_fence_unlock_irqrestore(fence, flags);
return prev;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c
b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1527b801f013..ec4dfa3ea725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -156,12 +156,13 @@ nouveau_name(struct drm_device *dev)
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
+ unsigned long flags;
bool ret = true;
- spin_lock_irq(fence->lock);
+ dma_fence_lock_irqsave(fence, flags);
if (!dma_fence_is_signaled_locked(fence))
ret = false;
- spin_unlock_irq(fence->lock);
+ dma_fence_unlock_irqrestore(fence, flags);
if (ret == true)
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c
b/drivers/gpu/drm/scheduler/sched_fence.c
index 9391d6f0dc01..724d77694246 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -156,19 +156,19 @@ static void drm_sched_fence_set_deadline_finished(struct
dma_fence *f,
struct dma_fence *parent;
unsigned long flags;
- spin_lock_irqsave(&fence->lock, flags);
+ dma_fence_lock_irqsave(f, flags);
/* If we already have an earlier deadline, keep it: */
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
ktime_before(fence->deadline, deadline)) {
- spin_unlock_irqrestore(&fence->lock, flags);
+ dma_fence_unlock_irqrestore(f, flags);
return;
}
fence->deadline = deadline;
set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
- spin_unlock_irqrestore(&fence->lock, flags);
+ dma_fence_unlock_irqrestore(f, flags);
/*
* smp_load_aquire() to ensure that if we are racing another
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c
b/drivers/gpu/drm/xe/xe_sched_job.c
index 3927666fe556..ae5b38b2a884 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -190,11 +190,11 @@ static bool xe_fence_set_error(struct dma_fence *fence,
int error)
unsigned long irq_flags;
bool signaled;
- spin_lock_irqsave(fence->lock, irq_flags);
+ dma_fence_lock_irqsave(fence, irq_flags);
signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
if (!signaled)
dma_fence_set_error(fence, error);
- spin_unlock_irqrestore(fence->lock, irq_flags);
+ dma_fence_unlock_irqrestore(fence, irq_flags);
return signaled;
}
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e1afbb5909f9..88c842fc35d5 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -377,6 +377,44 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
+/**
+ * dma_fence_spinlock - return pointer to the spinlock protecting the fence
+ * @fence: the fence to get the lock from
+ *
+ * Return the pointer to the extern lock.
+ */
+static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
+{
+ return fence->lock;
+}
+
+/**
+ * dma_fence_lock_irqsave - irqsave lock the fence
+ * @fence: the fence to lock
+ * @flags: where to store the CPU flags.
+ *
+ * Lock the fence, preventing it from changing to the signaled state.
+ */
+#define dma_fence_lock_irqsave(fence, flags) \
+ spin_lock_irqsave(fence->lock, flags)
+
+/**
+ * dma_fence_unlock_irqrestore - unlock the fence and irqrestore
+ * @fence: the fence to unlock
+ * @flags the CPU flags to restore
+ *
+ * Unlock the fence, allowing it to change it's state to signaled again.
+ */
+#define dma_fence_unlock_irqrestore(fence, flags) \
+ spin_unlock_irqrestore(fence->lock, flags)
+
+/**
+ * dma_fence_assert_held - lockdep assertion that fence is locked
+ * @fence: the fence which should be locked
+ */
+#define dma_fence_assert_held(fence) \
+ lockdep_assert_held(dma_fence_spinlock(fence));
+
#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);