On Tue, 2026-02-10 at 11:01 +0100, Christian König wrote: > At first glance it is counter intuitive to protect a constant function > pointer table by RCU, but this allows modules providing the function > table to unload by waiting for an RCU grace period.
I think that someone who does not already have a deep understanding about dma-buf and fences will have much trouble understanding *why* this patch is in the log and *what it achieves*. Good commit messages are at least as important as good code. In drm/sched for example I've been trying so many times to figure out why certain hacks and changes were implemented, but all that git-blame ever gave me was one liners, often hinting at some driver internal work around ._. > > v2: make one the now duplicated lockdep warnings a comment instead. > v3: Add more documentation to ->wait and ->release callback. > v4: fix typo in documentation > v5: rebased on drm-tip > > Signed-off-by: Christian König <[email protected]> > --- > drivers/dma-buf/dma-fence.c | 69 +++++++++++++++++++++++++------------ > include/linux/dma-fence.h | 29 ++++++++++++++-- > 2 files changed, 73 insertions(+), 25 deletions(-) > > diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c > index e05beae6e407..de9bf18be3d4 100644 > --- a/drivers/dma-buf/dma-fence.c > +++ b/drivers/dma-buf/dma-fence.c > @@ -522,6 +522,7 @@ EXPORT_SYMBOL(dma_fence_signal); > signed long > dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long > timeout) > { > + const struct dma_fence_ops *ops; > signed long ret; > > if (WARN_ON(timeout < 0)) > @@ -533,15 +534,21 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool > intr, signed long timeout) > > dma_fence_enable_sw_signaling(fence); > > - if (trace_dma_fence_wait_start_enabled()) { Why can wait_start_enabled() be removed? Is that related to the life time decoupling or is it a separate topic? > - rcu_read_lock(); > - trace_dma_fence_wait_start(fence); > + rcu_read_lock(); > + ops = rcu_dereference(fence->ops); > + trace_dma_fence_wait_start(fence); > + if (ops->wait) { > + /* > + * Implementing the wait ops is deprecated and not supported for > + * issuer independent fences, so it is ok to use the ops outside s/issuer/issuers of And how do we know that this here is an independent fence? What even is an "independent fence" – one with internal spinlock? > + * the RCU protected section. > + */ > + rcu_read_unlock(); > + ret = ops->wait(fence, intr, timeout); > + } else { > rcu_read_unlock(); > - } > - if (fence->ops->wait) > - ret = fence->ops->wait(fence, intr, timeout); > - else > ret = dma_fence_default_wait(fence, intr, timeout); > + } The git diff here looks awkward. Do you use git format-patch -- histogram? > if (trace_dma_fence_wait_end_enabled()) { > rcu_read_lock(); > trace_dma_fence_wait_end(fence); > @@ -562,6 +569,7 @@ void dma_fence_release(struct kref *kref) > { > struct dma_fence *fence = > container_of(kref, struct dma_fence, refcount); > + const struct dma_fence_ops *ops; > > rcu_read_lock(); > trace_dma_fence_destroy(fence); > @@ -593,12 +601,12 @@ void dma_fence_release(struct kref *kref) > spin_unlock_irqrestore(fence->lock, flags); > } > > - rcu_read_unlock(); > - > - if (fence->ops->release) > - fence->ops->release(fence); > + ops = rcu_dereference(fence->ops); > + if (ops->release) > + ops->release(fence); > else > dma_fence_free(fence); > + rcu_read_unlock(); > } > EXPORT_SYMBOL(dma_fence_release); > > @@ -617,6 +625,7 @@ EXPORT_SYMBOL(dma_fence_free); > > static bool __dma_fence_enable_signaling(struct dma_fence *fence) > { > + const struct dma_fence_ops *ops; > bool was_set; > > lockdep_assert_held(fence->lock); > @@ -627,14 +636,18 @@ static bool __dma_fence_enable_signaling(struct > dma_fence *fence) > if (dma_fence_test_signaled_flag(fence)) > return false; > > - if (!was_set && fence->ops->enable_signaling) { > + rcu_read_lock(); > + ops = rcu_dereference(fence->ops); > + if (!was_set && ops->enable_signaling) { > trace_dma_fence_enable_signal(fence); > > - if (!fence->ops->enable_signaling(fence)) { > + if (!ops->enable_signaling(fence)) { > + rcu_read_unlock(); > dma_fence_signal_locked(fence); > return false; > } > } > + rcu_read_unlock(); > > return true; > } > @@ -1007,8 +1020,13 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); > */ > void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) > { > - if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) > - fence->ops->set_deadline(fence, deadline); > + const struct dma_fence_ops *ops; > + > + rcu_read_lock(); > + ops = rcu_dereference(fence->ops); > + if (ops->set_deadline && !dma_fence_is_signaled(fence)) > + ops->set_deadline(fence, deadline); > + rcu_read_unlock(); > } > EXPORT_SYMBOL(dma_fence_set_deadline); > > @@ -1049,7 +1067,12 @@ __dma_fence_init(struct dma_fence *fence, const struct > dma_fence_ops *ops, > BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); > > kref_init(&fence->refcount); > - fence->ops = ops; > + /* > + * At first glance it is counter intuitive to protect a constant > + * function pointer table by RCU, but this allows modules providing the > + * function table to unload by waiting for an RCU grace period. Maybe add a sentence like "Fences can live longer than the module which issued them." > + */ > + RCU_INIT_POINTER(fence->ops, ops); > INIT_LIST_HEAD(&fence->cb_list); > fence->lock = lock; > fence->context = context; > @@ -1129,11 +1152,12 @@ EXPORT_SYMBOL(dma_fence_init64); > */ > const char __rcu *dma_fence_driver_name(struct dma_fence *fence) > { > - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), > - "RCU protection is required for safe access to > returned string"); > + const struct dma_fence_ops *ops; > > + /* RCU protection is required for safe access to returned string */ > + ops = rcu_dereference(fence->ops); > if (!dma_fence_test_signaled_flag(fence)) > - return (const char __rcu *)fence->ops->get_driver_name(fence); > + return (const char __rcu *)ops->get_driver_name(fence); > else > return (const char __rcu *)"detached-driver"; > } > @@ -1161,11 +1185,12 @@ EXPORT_SYMBOL(dma_fence_driver_name); > */ > const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) > { > - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), > - "RCU protection is required for safe access to > returned string"); > + const struct dma_fence_ops *ops; > > + /* RCU protection is required for safe access to returned string */ > + ops = rcu_dereference(fence->ops); > if (!dma_fence_test_signaled_flag(fence)) > - return (const char __rcu *)fence->ops->get_driver_name(fence); > + return (const char __rcu *)ops->get_driver_name(fence); > else > return (const char __rcu *)"signaled-timeline"; > } Did we make any progress in our conversation about removing those two functions and callbacks? They're only used by i915. > diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h > index 9c4d25289239..6bf4feb0e01f 100644 > --- a/include/linux/dma-fence.h > +++ b/include/linux/dma-fence.h > @@ -67,7 +67,7 @@ struct seq_file; > */ > struct dma_fence { > spinlock_t *lock; > - const struct dma_fence_ops *ops; > + const struct dma_fence_ops __rcu *ops; > /* > * We clear the callback list on kref_put so that by the time we > * release the fence it is unused. No one should be adding to the > @@ -220,6 +220,10 @@ struct dma_fence_ops { > * timed out. Can also return other error values on custom > implementations, > * which should be treated as if the fence is signaled. For example a > hardware > * lockup could be reported like that. > + * > + * Implementing this callback prevents the fence from detaching after > + * signaling and so it is mandatory for the module providing the s/mandatory/necessary ? > + * dma_fence_ops to stay loaded as long as the dma_fence exists. > */ > signed long (*wait)(struct dma_fence *fence, > bool intr, signed long timeout); > @@ -231,6 +235,13 @@ struct dma_fence_ops { > * Can be called from irq context. This callback is optional. If it is > * NULL, then dma_fence_free() is instead called as the default > * implementation. > + * > + * Implementing this callback prevents the fence from detaching after > + * signaling and so it is mandatory for the module providing the same > + * dma_fence_ops to stay loaded as long as the dma_fence exists. > + * > + * If the callback is implemented the memory backing the dma_fence > + * object must be freed RCU safe. > */ > void (*release)(struct dma_fence *fence); > > @@ -454,13 +465,19 @@ dma_fence_test_signaled_flag(struct dma_fence *fence) > static inline bool > dma_fence_is_signaled_locked(struct dma_fence *fence) > { > + const struct dma_fence_ops *ops; > + > if (dma_fence_test_signaled_flag(fence)) > return true; > > - if (fence->ops->signaled && fence->ops->signaled(fence)) { > + rcu_read_lock(); > + ops = rcu_dereference(fence->ops); > + if (ops->signaled && ops->signaled(fence)) { Maybe you can educate me a bit about RCU here – couldn't this still race? If the ops were unloaded before you take rcu_read_lock(), rcu_dereference() would give you an invalid pointer here since you don't check for !ops, no? > + rcu_read_unlock(); > dma_fence_signal_locked(fence); > return true; > } > + rcu_read_unlock(); > > return false; > } > @@ -484,13 +501,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence) > static inline bool > dma_fence_is_signaled(struct dma_fence *fence) > { > + const struct dma_fence_ops *ops; > + > if (dma_fence_test_signaled_flag(fence)) > return true; > > - if (fence->ops->signaled && fence->ops->signaled(fence)) { > + rcu_read_lock(); > + ops = rcu_dereference(fence->ops); > + if (ops->signaled && ops->signaled(fence)) { same Danke, P. > + rcu_read_unlock(); > dma_fence_signal(fence); > return true; > } > + rcu_read_unlock(); > > return false; > }
