This is a preparatory refactoring for the next commit, which adds contended_release tracepoint instrumentation and needs to call the unlock from both traced and non-traced paths.
No functional change. Signed-off-by: Dmitry Ilvokhin <[email protected]> Acked-by: Paul E. McKenney <[email protected]> --- include/asm-generic/qrwlock.h | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 75b8f4601b28..4b627bafba8b 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -101,16 +101,26 @@ static inline void queued_write_lock(struct qrwlock *lock) queued_write_lock_slowpath(lock); } +static __always_inline void __queued_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); +} + /** * queued_read_unlock - release read lock of a queued rwlock * @lock : Pointer to queued rwlock structure */ static inline void queued_read_unlock(struct qrwlock *lock) { - /* - * Atomically decrement the reader count - */ - (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); + __queued_read_unlock(lock); +} + +static __always_inline void __queued_write_unlock(struct qrwlock *lock) +{ + smp_store_release(&lock->wlocked, 0); } /** @@ -119,7 +129,7 @@ static inline void queued_read_unlock(struct qrwlock *lock) */ static inline void queued_write_unlock(struct qrwlock *lock) { - smp_store_release(&lock->wlocked, 0); + __queued_write_unlock(lock); } /** -- 2.52.0
