On Wed, Apr 05, 2023 at 01:41:48PM +0200, Peter Zijlstra wrote:
> On Wed, Apr 05, 2023 at 01:10:07PM +0200, Frederic Weisbecker wrote:
> > On Wed, Apr 05, 2023 at 12:44:04PM +0200, Frederic Weisbecker wrote:
> > > On Tue, Apr 04, 2023 at 04:42:24PM +0300, Yair Podemsky wrote:
> > > > +       int state = atomic_read(&ct->state);
> > > > +       /* will return true only for cpus in kernel space */
> > > > +       return state & CT_STATE_MASK == CONTEXT_KERNEL;
> > > > +}
> > > 
> > > Also note that this doesn't stricly prevent userspace from being 
> > > interrupted.
> > > You may well observe the CPU in kernel but it may receive the IPI later 
> > > after
> > > switching to userspace.
> > > 
> > > We could arrange for avoiding that with marking ct->state with a pending 
> > > work bit
> > > to flush upon user entry/exit but that's a bit more overhead so I first 
> > > need to
> > > know about your expectations here, ie: can you tolerate such an occasional
> > > interruption or not?
> > 
> > Bah, actually what can we do to prevent from that racy IPI? Not much I 
> > fear...
> 
> Yeah, so I don't think that's actually a problem. The premise is that
> *IFF* NOHZ_FULL stays in userspace, then it will never observe the IPI.
> 
> If it violates this by doing syscalls or other kernel entries; it gets
> to keep the pieces.

Ok so how about the following (only build tested)?

Two things:

1) It has the advantage to check context tracking _after_ the llist_add(), so
   it really can't be misused ordering-wise.

2) The IPI callback is always enqueued and then executed upon return
   from userland. The ordering makes sure it will either IPI or execute
   upon return to userspace.

diff --git a/include/linux/context_tracking_state.h 
b/include/linux/context_tracking_state.h
index 4a4d56f77180..dc4b56da1747 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -137,10 +137,23 @@ static __always_inline int ct_state(void)
        return ret;
 }
 
+static __always_inline int ct_state_cpu(int cpu)
+{
+       struct context_tracking *ct;
+
+       if (!context_tracking_enabled())
+               return CONTEXT_DISABLED;
+
+       ct = per_cpu_ptr(&context_tracking, cpu);
+
+       return atomic_read(&ct->state) & CT_STATE_MASK;
+}
+
 #else
 static __always_inline bool context_tracking_enabled(void) { return false; }
 static __always_inline bool context_tracking_enabled_cpu(int cpu) { return 
false; }
 static __always_inline bool context_tracking_enabled_this_cpu(void) { return 
false; }
+static inline int ct_state_cpu(int cpu) { return CONTEXT_DISABLED; }
 #endif /* CONFIG_CONTEXT_TRACKING_USER */
 
 #endif
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 846add8394c4..cdc7e8a59acc 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -10,6 +10,7 @@
 #include <linux/audit.h>
 #include <linux/tick.h>
 
+#include "../kernel/sched/smp.h"
 #include "common.h"
 
 #define CREATE_TRACE_POINTS
@@ -27,6 +28,10 @@ static __always_inline void __enter_from_user_mode(struct 
pt_regs *regs)
        instrumentation_begin();
        kmsan_unpoison_entry_regs(regs);
        trace_hardirqs_off_finish();
+
+       /* Flush delayed IPI queue on nohz_full */
+       if (context_tracking_enabled_this_cpu())
+               flush_smp_call_function_queue();
        instrumentation_end();
 }
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 06a413987a14..14b25d25ef3a 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -878,6 +878,8 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
  */
 #define SCF_WAIT       (1U << 0)
 #define SCF_RUN_LOCAL  (1U << 1)
+#define SCF_NO_USER    (1U << 2)
+
 
 static void smp_call_function_many_cond(const struct cpumask *mask,
                                        smp_call_func_t func, void *info,
@@ -946,10 +948,13 @@ static void smp_call_function_many_cond(const struct 
cpumask *mask,
 #endif
                        cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, 
CFD_SEQ_QUEUE);
                        if (llist_add(&csd->node.llist, 
&per_cpu(call_single_queue, cpu))) {
-                               __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
-                               nr_cpus++;
-                               last_cpu = cpu;
-
+                               if (!(scf_flags & SCF_NO_USER) ||
+                                   !IS_ENABLED(CONFIG_GENERIC_ENTRY) ||
+                                    ct_state_cpu(cpu) != CONTEXT_USER) {
+                                       __cpumask_set_cpu(cpu, 
cfd->cpumask_ipi);
+                                       nr_cpus++;
+                                       last_cpu = cpu;
+                               }
                                cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, 
CFD_SEQ_IPI);
                        } else {
                                cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, 
CFD_SEQ_NOIPI);
@@ -1121,6 +1126,24 @@ void __init smp_init(void)
        smp_cpus_done(setup_max_cpus);
 }
 
+static void __on_each_cpu_cond_mask(smp_cond_func_t cond_func,
+                                   smp_call_func_t func,
+                                   void *info, bool wait, bool nouser,
+                                   const struct cpumask *mask)
+{
+       unsigned int scf_flags = SCF_RUN_LOCAL;
+
+       if (wait)
+               scf_flags |= SCF_WAIT;
+
+       if (nouser)
+               scf_flags |= SCF_NO_USER;
+
+       preempt_disable();
+       smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
+       preempt_enable();
+}
+
 /*
  * on_each_cpu_cond(): Call a function on each processor for which
  * the supplied function cond_func returns true, optionally waiting
@@ -1146,17 +1169,18 @@ void __init smp_init(void)
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
                           void *info, bool wait, const struct cpumask *mask)
 {
-       unsigned int scf_flags = SCF_RUN_LOCAL;
-
-       if (wait)
-               scf_flags |= SCF_WAIT;
-
-       preempt_disable();
-       smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
-       preempt_enable();
+       __on_each_cpu_cond_mask(cond_func, func, info, wait, false, mask);
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
+void on_each_cpu_cond_nouser_mask(smp_cond_func_t cond_func,
+                                 smp_call_func_t func,
+                                 void *info, bool wait,
+                                 const struct cpumask *mask)
+{
+       __on_each_cpu_cond_mask(cond_func, func, info, wait, true, mask);
+}
+
 static void do_nothing(void *unused)
 {
 }

Reply via email to