Here is a small optimization to SMR's naive grace period mechanism.
It makes use of the fact that some CPUs may cross a quiescent state
independently while the SMR thread is running smr_grace_wait().
Such CPUs can be skipped.

Index: kern/kern_smr.c
===================================================================
RCS file: src/sys/kern/kern_smr.c,v
retrieving revision 1.8
diff -u -p -r1.8 kern_smr.c
--- kern/kern_smr.c     3 Apr 2020 03:36:56 -0000       1.8
+++ kern/kern_smr.c     3 Dec 2020 17:05:22 -0000
@@ -41,6 +41,7 @@ struct smr_entry_list smr_deferred;
 struct timeout         smr_wakeup_tmo;
 unsigned int           smr_expedite;
 unsigned int           smr_ndeferred;
+unsigned char          smr_grace_period;
 
 #ifdef WITNESS
 static const char smr_lock_name[] = "smr";
@@ -131,20 +132,27 @@ smr_thread(void *arg)
 }
 
 /*
- * Block until all CPUs have crossed quiescent state.
+ * Announce next grace period and wait until all CPUs have entered it
+ * by crossing quiescent state.
  */
 void
 smr_grace_wait(void)
 {
 #ifdef MULTIPROCESSOR
        CPU_INFO_ITERATOR cii;
-       struct cpu_info *ci, *ci_start;
+       struct cpu_info *ci;
+       unsigned char smrgp;
+
+       smrgp = READ_ONCE(smr_grace_period) + 1;
+       WRITE_ONCE(smr_grace_period, smrgp);
+
+       curcpu()->ci_schedstate.spc_smrgp = smrgp;
 
-       ci_start = curcpu();
        CPU_INFO_FOREACH(cii, ci) {
-               if (ci == ci_start)
+               if (READ_ONCE(ci->ci_schedstate.spc_smrgp) == smrgp)
                        continue;
                sched_peg_curproc(ci);
+               KASSERT(ci->ci_schedstate.spc_smrgp == smrgp);
        }
        atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
 #endif /* MULTIPROCESSOR */
@@ -209,11 +217,23 @@ void
 smr_idle(void)
 {
        struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
+       unsigned char smrgp;
 
        SMR_ASSERT_NONCRITICAL();
 
        if (spc->spc_ndeferred > 0)
                smr_dispatch(spc);
+
+       /*
+        * Update this CPU's view of the system's grace period.
+        * The update must become visible after any preceding reads
+        * of SMR-protected data.
+        */
+       smrgp = READ_ONCE(smr_grace_period);
+       if (__predict_false(spc->spc_smrgp != smrgp)) {
+               membar_exit();
+               WRITE_ONCE(spc->spc_smrgp, smrgp);
+       }
 }
 
 void
Index: sys/sched.h
===================================================================
RCS file: src/sys/sys/sched.h,v
retrieving revision 1.56
diff -u -p -r1.56 sched.h
--- sys/sched.h 21 Oct 2019 10:24:01 -0000      1.56
+++ sys/sched.h 3 Dec 2020 17:05:22 -0000
@@ -119,6 +119,7 @@ struct schedstate_percpu {
        u_int spc_smrdepth;             /* level of smr nesting */
        u_char spc_smrexpedite;         /* if set, dispatch smr entries
                                         * without delay */
+       u_char spc_smrgp;               /* this CPU's view of grace period */
 };
 
 struct cpustats {

Reply via email to