Remove the IPI fallback code from membarrier to deal with very
infrequent cpumask memory allocation failure. Use GFP_KERNEL rather
than GFP_NOWAIT, and relax the blocking guarantees for the expedited
membarrier system call commands, allowing it to block if waiting for
memory to be made available.

In addition, now -ENOMEM can be returned to user-space if the cpumask
memory allocation fails.

Signed-off-by: Mathieu Desnoyers <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Russell King - ARM Linux admin <[email protected]>
Cc: Chris Metcalf <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Kirill Tkhai <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
---
 kernel/sched/membarrier.c | 61 ++++++++++++---------------------------
 1 file changed, 19 insertions(+), 42 deletions(-)

diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 8afbdf92be0a..1420c656e8f0 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
 static int membarrier_global_expedited(void)
 {
        int cpu;
-       bool fallback = false;
        cpumask_var_t tmpmask;
 
        if (num_online_cpus() == 1)
@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
         */
        smp_mb();       /* system call entry is not a mb. */
 
-       /*
-        * Expedited membarrier commands guarantee that they won't
-        * block, hence the GFP_NOWAIT allocation flag and fallback
-        * implementation.
-        */
-       if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
-               /* Fallback for OOM. */
-               fallback = true;
-       }
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
 
        cpus_read_lock();
        rcu_read_lock();
@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
                if (p->flags & PF_KTHREAD)
                        continue;
 
-               if (!fallback)
-                       __cpumask_set_cpu(cpu, tmpmask);
-               else
-                       smp_call_function_single(cpu, ipi_mb, NULL, 1);
+               __cpumask_set_cpu(cpu, tmpmask);
        }
        rcu_read_unlock();
-       if (!fallback) {
-               preempt_disable();
-               smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
-               preempt_enable();
-               free_cpumask_var(tmpmask);
-       }
+
+       preempt_disable();
+       smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+       preempt_enable();
+
+       free_cpumask_var(tmpmask);
        cpus_read_unlock();
 
        /*
@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
 static int membarrier_private_expedited(int flags)
 {
        int cpu;
-       bool fallback = false;
        cpumask_var_t tmpmask;
        struct mm_struct *mm = current->mm;
 
@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
         */
        smp_mb();       /* system call entry is not a mb. */
 
-       /*
-        * Expedited membarrier commands guarantee that they won't
-        * block, hence the GFP_NOWAIT allocation flag and fallback
-        * implementation.
-        */
-       if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
-               /* Fallback for OOM. */
-               fallback = true;
-       }
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
 
        cpus_read_lock();
        rcu_read_lock();
@@ -194,20 +175,16 @@ static int membarrier_private_expedited(int flags)
                if (cpu == raw_smp_processor_id())
                        continue;
                p = task_rcu_dereference(&cpu_rq(cpu)->curr);
-               if (p && p->mm == mm) {
-                       if (!fallback)
-                               __cpumask_set_cpu(cpu, tmpmask);
-                       else
-                               smp_call_function_single(cpu, ipi_mb, NULL, 1);
-               }
+               if (p && p->mm == mm)
+                       __cpumask_set_cpu(cpu, tmpmask);
        }
        rcu_read_unlock();
-       if (!fallback) {
-               preempt_disable();
-               smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
-               preempt_enable();
-               free_cpumask_var(tmpmask);
-       }
+
+       preempt_disable();
+       smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+       preempt_enable();
+
+       free_cpumask_var(tmpmask);
        cpus_read_unlock();
 
        /*
-- 
2.17.1

Reply via email to