Author: mjg
Date: Sat Nov 30 17:24:42 2019
New Revision: 355231
URL: https://svnweb.freebsd.org/changeset/base/355231

Log:
  lockprof: use IPI-injecetd fences to fix hangs on stat dump and reset
  
  The previously used quiesce_all_cpus walks all CPUs and waits until curthread
  can run on them. Even on contemporary machines this becomes a significant
  problem under load when it can literally take minutes for the operation to
  complete. With the patch the stall is normally less than 1 second.
  
  Reviewed by:  kib, jeff (previous version)
  Sponsored by: The FreeBSD Foundation
  Differential Revision:        https://reviews.freebsd.org/D21740

Modified:
  head/sys/kern/subr_lock.c

Modified: head/sys/kern/subr_lock.c
==============================================================================
--- head/sys/kern/subr_lock.c   Sat Nov 30 17:22:10 2019        (r355230)
+++ head/sys/kern/subr_lock.c   Sat Nov 30 17:24:42 2019        (r355231)
@@ -324,8 +324,14 @@ lock_prof_reset(void)
        atomic_store_rel_int(&lock_prof_resetting, 1);
        enabled = lock_prof_enable;
        lock_prof_enable = 0;
-       quiesce_all_cpus("profreset", 0);
        /*
+        * This both publishes lock_prof_enable as disabled and makes sure
+        * everyone else reads it if they are not far enough. We wait for the
+        * rest down below.
+        */
+       cpus_fence_seq_cst();
+       quiesce_all_critical();
+       /*
         * Some objects may have migrated between CPUs.  Clear all links
         * before we zero the structures.  Some items may still be linked
         * into per-thread lists as well.
@@ -343,6 +349,9 @@ lock_prof_reset(void)
                lock_prof_init_type(&lpc->lpc_types[0]);
                lock_prof_init_type(&lpc->lpc_types[1]);
        }
+       /*
+        * Paired with the fence from cpus_fence_seq_cst()
+        */
        atomic_store_rel_int(&lock_prof_resetting, 0);
        lock_prof_enable = enabled;
 }
@@ -433,12 +442,17 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
            "max", "wait_max", "total", "wait_total", "count", "avg", 
"wait_avg", "cnt_hold", "cnt_lock", "name");
        enabled = lock_prof_enable;
        lock_prof_enable = 0;
-       quiesce_all_cpus("profstat", 0);
+       /*
+        * See the comment in lock_prof_reset
+        */
+       cpus_fence_seq_cst();
+       quiesce_all_critical();
        t = ticks;
        CPU_FOREACH(cpu) {
                lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t);
                lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t);
        }
+       atomic_thread_fence_rel();
        lock_prof_enable = enabled;
 
        error = sbuf_finish(sb);
@@ -591,6 +605,10 @@ lock_profile_obtain_lock_success(struct lock_object *l
        else
                l->lpo_waittime = 0;
 out:
+       /*
+        * Paired with cpus_fence_seq_cst().
+        */
+       atomic_thread_fence_rel();
        critical_exit();
 }
 
@@ -677,6 +695,10 @@ release:
        type = &LP_CPU_SELF->lpc_types[spin];
        LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
 out:
+       /*
+        * Paired with cpus_fence_seq_cst().
+        */
+       atomic_thread_fence_rel();
        critical_exit();
 }
 
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to