Background
==========
One of our latency-sensitive services reported random CPU pressure spikes.
After a thorough investigation, we finally identified the root cause of the
CPU pressure spikes. The key kernel stacks are as follows:
- Task A
2026-02-14-16:53:40.938243: [CPU198] 2156302(bpftrace) cgrp:4019437 pod:4019253
find_kallsyms_symbol+142
module_address_lookup+104
kallsyms_lookup_buildid+203
kallsyms_lookup+20
print_rec+64
t_show+67
seq_read_iter+709
seq_read+165
vfs_read+165
ksys_read+103
__x64_sys_read+25
do_syscall_64+56
entry_SYSCALL_64_after_hwframe+100
This task (2156302, bpftrace) is reading the
/sys/kernel/tracing/available_filter_functions to check if a function
is traceable:
https://github.com/bpftrace/bpftrace/blob/master/src/tracefs/tracefs.h#L21
Reading the available_filter_functions file is time-consuming, as it
contains tens of thousands of functions:
$ cat /sys/kernel/tracing/available_filter_functions | wc -l
59221
$ time cat /sys/kernel/tracing/available_filter_functions > /dev/null
real 0m0.458s user 0m0.001s sys 0m0.457s
Consequently, the ftrace_lock is held by this task for an extended period.
- Other Tasks
2026-02-14-16:53:41.437094: [CPU79] 2156308(bpftrace) cgrp:4019437 pod:4019253
mutex_spin_on_owner+108
__mutex_lock.constprop.0+1132
__mutex_lock_slowpath+19
mutex_lock+56
t_start+51
seq_read_iter+250
seq_read+165
vfs_read+165
ksys_read+103
__x64_sys_read+25
do_syscall_64+56
entry_SYSCALL_64_after_hwframe+100
Since ftrace_lock is held by Task-A and Task-A is actively running on a
CPU, all other tasks waiting for the same lock will spin on their
respective CPUs. This leads to increased CPU pressure.
Reproduction
============
This issue can be reproduced simply by running
`cat available_filter_functions`.
- Single process reading available_filter_functions:
$ time cat /sys/kernel/tracing/available_filter_functions > /dev/null
real 0m0.458s user 0m0.001s sys 0m0.457s
- Six processes reading available_filter_functions simultaneously:
for i in `seq 0 5`; do
time cat /sys/kernel/tracing/available_filter_functions > /dev/null &
done
The results are as follows:
real 0m2.666s user 0m0.001s sys 0m2.557s
real 0m2.718s user 0m0.000s sys 0m2.655s
real 0m2.718s user 0m0.001s sys 0m2.600s
real 0m2.733s user 0m0.001s sys 0m2.554s
real 0m2.735s user 0m0.000s sys 0m2.573s
real 0m2.738s user 0m0.000s sys 0m2.664s
As more processes are added, the system time increases correspondingly.
Solution
========
One approach is to optimize the reading of available_filter_functions to
make it as fast as possible. However, the risk lies in the contention
caused by optimistic spin locking.
Therefore, we need to consider an alternative solution that avoids
optimistic spinning for heavy mutexes that may be held for long durations.
Note that we do not want to disable CONFIG_MUTEX_SPIN_ON_OWNER entirely, as
that could lead to unexpected performance regressions.
In this patch, two new APIs are introduced to allow heavy locks to
selectively disable optimistic spinning.
slow_mutex_lock() - lock a mutex without optimistic spinning
slow_mutex_unlock() - unlock the slow mutex
- The result of this optimization
After applying this slow mutex to ftrace_lock and concurrently running six
processes, the results are as follows:
real 0m2.691s user 0m0.001s sys 0m0.458s
real 0m2.785s user 0m0.001s sys 0m0.467s
real 0m2.787s user 0m0.000s sys 0m0.469s
real 0m2.787s user 0m0.000s sys 0m0.466s
real 0m2.788s user 0m0.001s sys 0m0.468s
real 0m2.789s user 0m0.000s sys 0m0.471s
The system time remains similar to that of running a single process.
Signed-off-by: Yafang Shao <[email protected]>
---
include/linux/mutex.h | 4 ++++
kernel/locking/mutex.c | 41 ++++++++++++++++++++++++++++++++++-------
2 files changed, 38 insertions(+), 7 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ecaa0440f6ec..eed0e87c084c 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -189,11 +189,13 @@ extern int __must_check
mutex_lock_interruptible_nested(struct mutex *lock,
extern int __must_check _mutex_lock_killable(struct mutex *lock,
unsigned int subclass, struct lockdep_map *nest_lock)
__cond_acquires(0, lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
__acquires(lock);
+extern void slow_mutex_lock_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
+#define slow_mutex_lock(lock) slow_mutex_lock_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
do { \
@@ -215,6 +217,7 @@ extern void mutex_lock(struct mutex *lock) __acquires(lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock)
__cond_acquires(0, lock);
extern int __must_check mutex_lock_killable(struct mutex *lock)
__cond_acquires(0, lock);
extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
+extern void slow_mutex_lock(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass)
mutex_lock_interruptible(lock)
@@ -247,6 +250,7 @@ extern int mutex_trylock(struct mutex *lock)
__cond_acquires(true, lock);
#endif
extern void mutex_unlock(struct mutex *lock) __releases(lock);
+#define slow_mutex_unlock(lock) mutex_unlock(lock)
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
__cond_acquires(true, lock);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 2a1d165b3167..5766d824b3fe 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -443,8 +443,11 @@ static inline int mutex_can_spin_on_owner(struct mutex
*lock)
*/
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
- struct mutex_waiter *waiter)
+ struct mutex_waiter *waiter, const bool slow)
{
+ if (slow)
+ return false;
+
if (!waiter) {
/*
* The purpose of the mutex_can_spin_on_owner() function is
@@ -577,7 +580,8 @@ EXPORT_SYMBOL(ww_mutex_unlock);
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int
subclass,
struct lockdep_map *nest_lock, unsigned long ip,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx,
+ const bool slow)
{
DEFINE_WAKE_Q(wake_q);
struct mutex_waiter waiter;
@@ -615,7 +619,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state,
unsigned int subclas
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (__mutex_trylock(lock) ||
- mutex_optimistic_spin(lock, ww_ctx, NULL)) {
+ mutex_optimistic_spin(lock, ww_ctx, NULL, slow)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
@@ -716,7 +720,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state,
unsigned int subclas
* to run.
*/
clear_task_blocked_on(current, lock);
- if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
+ if (mutex_optimistic_spin(lock, ww_ctx, &waiter, slow))
break;
set_task_blocked_on(current, lock);
trace_contention_begin(lock, LCB_F_MUTEX);
@@ -773,14 +777,21 @@ static int __sched
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
- return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL,
false);
+ return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL,
false, false);
+}
+
+static int __sched
+__slow_mutex_lock(struct mutex *lock, unsigned int state, unsigned int
subclass,
+ struct lockdep_map *nest_lock, unsigned long ip)
+{
+ return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL,
false, true);
}
static int __sched
__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
unsigned long ip, struct ww_acquire_ctx *ww_ctx)
{
- return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx,
true);
+ return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx,
true, false);
}
/**
@@ -861,11 +872,17 @@ mutex_lock_io_nested(struct mutex *lock, unsigned int
subclass)
token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL, 0);
+ subclass, NULL, _RET_IP_, NULL, 0, false);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+void __sched
+slow_mutex_lock_nested(struct mutex *lock, unsigned int subclass)
+{
+ __slow_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+}
+
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
@@ -923,6 +940,16 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct
ww_acquire_ctx *ctx)
}
EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
+#else
+
+void __sched slow_mutex_lock(struct mutex *lock)
+{
+ might_sleep();
+
+ if (!__mutex_trylock_fast(lock))
+ __slow_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL,
_RET_IP_);
+}
+
#endif
/*
--
2.47.3