On 7/31/25 11:48, Nysal Jan K.A. wrote:
Add a lock contention tracepoint in the queued spinlock slowpath.
Also add the __lockfunc annotation so that in_lock_functions()
works as expected.


There is bit of pure code movement. Given that is small, single patch is fine.

Signed-off-by: Nysal Jan K.A. <ny...@linux.ibm.com>

Tried the patch and able to see tracepoints.

Reviewed-by: Shrikanth Hegde <sshe...@linux.ibm.com>

---
  arch/powerpc/lib/qspinlock.c | 19 ++++++++++---------
  1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index bcc7e4dff8c3..95ab4cdf582e 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -9,6 +9,7 @@
  #include <linux/sched/clock.h>
  #include <asm/qspinlock.h>
  #include <asm/paravirt.h>
+#include <trace/events/lock.h>
#define MAX_NODES 4 @@ -708,26 +709,26 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
        qnodesp->count--;
  }
-void queued_spin_lock_slowpath(struct qspinlock *lock)
+void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock)
  {
+       trace_contention_begin(lock, LCB_F_SPIN);
        /*
         * This looks funny, but it induces the compiler to inline both
         * sides of the branch rather than share code as when the condition
         * is passed as the paravirt argument to the functions.
         */
        if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
-               if (try_to_steal_lock(lock, true)) {
+               if (try_to_steal_lock(lock, true))
                        spec_barrier();
-                       return;
-               }
-               queued_spin_lock_mcs_queue(lock, true);
+               else
+                       queued_spin_lock_mcs_queue(lock, true);
        } else {
-               if (try_to_steal_lock(lock, false)) {
+               if (try_to_steal_lock(lock, false))
                        spec_barrier();
-                       return;
-               }
-               queued_spin_lock_mcs_queue(lock, false);
+               else
+                       queued_spin_lock_mcs_queue(lock, false);
        }
+       trace_contention_end(lock, 0);
  }
  EXPORT_SYMBOL(queued_spin_lock_slowpath);


Reply via email to