Currently, the membarrier system call uses a single global mutex
(`membarrier_ipi_mutex`) to serialize expedited commands. This causes
significant contention on large systems when multiple threads invoke
membarrier concurrently, even if they target different CPUs.

This contention becomes critical when combined with CFS bandwidth
throttling/unthrottling, during which interrupts can be disabled for
relatively long periods on target CPUs. If membarrier is waiting for a
response from such a CPU, it holds the global mutex, blocking all other
membarrier calls on the system. This cascade effect can lead to hard
lockups when thousands of threads stall waiting for the mutex.

Optimize `MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ` when a specific CPU is
targeted by introducing per-CPU mutexes. Broadcast commands and commands
without a specific CPU target continue to use the global mutex.

This prevents the cascade lockup scenario. As measured by the stress test
introduced in the subsequent patch, on an AMD Turin machine with 384 CPUs
(2 NUMA nodes with SMT=2), this optimization yields 200x more
throughput.

Changes in v3:
- Fixed the code path when `cpu_id < 0` in membarrier_private_expedited
  as reported by Peter.
  Link: 
https://lore.kernel.org/lkml/[email protected]/

Signed-off-by: Aniket Gattani <[email protected]>
---
 kernel/sched/membarrier.c | 77 ++++++++++++++++++++++-----------------
 1 file changed, 44 insertions(+), 33 deletions(-)

diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 623445603725..3d88e900a17f 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -164,8 +164,26 @@
        | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK                     \
        | MEMBARRIER_CMD_GET_REGISTRATIONS)
 
+/*
+ * Scoped guard for memory barriers on entry and exit.
+ * Matches memory barriers before & after rq->curr modification in scheduler.
+ */
+DEFINE_LOCK_GUARD_0(mb, smp_mb(), smp_mb())
 static DEFINE_MUTEX(membarrier_ipi_mutex);
+static DEFINE_PER_CPU(struct mutex, membarrier_cpu_mutexes);
+
 #define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
+#define SERIALIZE_IPI_CPU(cpu_id) 
guard(mutex)(&per_cpu(membarrier_cpu_mutexes, cpu_id))
+
+static int __init membarrier_init(void)
+{
+       int i;
+
+       for_each_possible_cpu(i)
+               mutex_init(&per_cpu(membarrier_cpu_mutexes, i));
+       return 0;
+}
+core_initcall(membarrier_init);
 
 static void ipi_mb(void *info)
 {
@@ -315,7 +333,6 @@ static int membarrier_global_expedited(void)
 
 static int membarrier_private_expedited(int flags, int cpu_id)
 {
-       cpumask_var_t tmpmask;
        struct mm_struct *mm = current->mm;
        smp_call_func_t ipi_func = ipi_mb;
 
@@ -352,30 +369,45 @@ static int membarrier_private_expedited(int flags, int 
cpu_id)
         * On RISC-V, this barrier pairing is also needed for the
         * SYNC_CORE command when switching between processes, cf.
         * the inline comments in membarrier_arch_switch_mm().
+        *
+        * Memory barrier on the caller thread _after_ we finished
+        * waiting for the last IPI. Matches memory barriers before
+        * rq->curr modification in scheduler.
         */
-       smp_mb();       /* system call entry is not a mb. */
-
-       if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
-               return -ENOMEM;
-
-       SERIALIZE_IPI();
-       cpus_read_lock();
-
+       guard(mb)();
        if (cpu_id >= 0) {
+               if (cpu_id >= nr_cpu_ids || !cpu_possible(cpu_id))
+                       return 0;
+
+               SERIALIZE_IPI_CPU(cpu_id);
+               guard(cpus_read_lock)();
                struct task_struct *p;
 
-               if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
-                       goto out;
+               if (!cpu_online(cpu_id))
+                       return 0;
+
                rcu_read_lock();
                p = rcu_dereference(cpu_rq(cpu_id)->curr);
                if (!p || p->mm != mm) {
                        rcu_read_unlock();
-                       goto out;
+                       return 0;
                }
                rcu_read_unlock();
+               /*
+                * smp_call_function_single() will call ipi_func() if cpu_id
+                * is the calling CPU.
+                */
+               smp_call_function_single(cpu_id, ipi_func, NULL, 1);
        } else {
+               cpumask_var_t __free(free_cpumask_var) tmpmask = 
CPUMASK_VAR_NULL;
                int cpu;
 
+               if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+                       return -ENOMEM;
+
+               SERIALIZE_IPI();
+               guard(cpus_read_lock)();
+
                rcu_read_lock();
                for_each_online_cpu(cpu) {
                        struct task_struct *p;
@@ -385,15 +417,6 @@ static int membarrier_private_expedited(int flags, int 
cpu_id)
                                __cpumask_set_cpu(cpu, tmpmask);
                }
                rcu_read_unlock();
-       }
-
-       if (cpu_id >= 0) {
-               /*
-                * smp_call_function_single() will call ipi_func() if cpu_id
-                * is the calling CPU.
-                */
-               smp_call_function_single(cpu_id, ipi_func, NULL, 1);
-       } else {
                /*
                 * For regular membarrier, we can save a few cycles by
                 * skipping the current cpu -- we're about to do smp_mb()
@@ -420,18 +443,6 @@ static int membarrier_private_expedited(int flags, int 
cpu_id)
                }
        }
 
-out:
-       if (cpu_id < 0)
-               free_cpumask_var(tmpmask);
-       cpus_read_unlock();
-
-       /*
-        * Memory barrier on the caller thread _after_ we finished
-        * waiting for the last IPI. Matches memory barriers before
-        * rq->curr modification in scheduler.
-        */
-       smp_mb();       /* exit from system call is not a mb */
-
        return 0;
 }
 
-- 
2.54.0.545.g6539524ca2-goog


Reply via email to