Author: avg
Date: Wed May 16 09:03:29 2012
New Revision: 235502
URL: http://svn.freebsd.org/changeset/base/235502

Log:
  MFC r228424,228448,230643: panic: add a switch and infrastructure for
  stopping other CPUs in SMP case

Modified:
  stable/8/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
  stable/8/sys/dev/usb/usb_transfer.c
  stable/8/sys/geom/geom_bsd.c
  stable/8/sys/geom/geom_mbr.c
  stable/8/sys/geom/geom_pc98.c
  stable/8/sys/kern/kern_lock.c
  stable/8/sys/kern/kern_mutex.c
  stable/8/sys/kern/kern_rmlock.c
  stable/8/sys/kern/kern_rwlock.c
  stable/8/sys/kern/kern_shutdown.c
  stable/8/sys/kern/kern_sx.c
  stable/8/sys/kern/kern_synch.c
  stable/8/sys/kern/subr_kdb.c
  stable/8/sys/kern/subr_lock.c
  stable/8/sys/kern/subr_witness.c
  stable/8/sys/security/mac/mac_priv.c
  stable/8/sys/sys/mutex.h
  stable/8/sys/sys/proc.h
  stable/8/sys/sys/systm.h
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/boot/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/e1000/   (props changed)
  stable/8/sys/i386/conf/XENHVM   (props changed)

Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c    Wed May 
16 07:18:56 2012        (r235501)
+++ stable/8/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c    Wed May 
16 09:03:29 2012        (r235502)
@@ -5877,6 +5877,9 @@ dtrace_probe(dtrace_id_t id, uintptr_t a
        volatile uint16_t *flags;
        hrtime_t now;
 
+       if (panicstr != NULL)
+               return;
+
 #if defined(sun)
        /*
         * Kick out immediately if this CPU is still being born (in which case

Modified: stable/8/sys/dev/usb/usb_transfer.c
==============================================================================
--- stable/8/sys/dev/usb/usb_transfer.c Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/dev/usb/usb_transfer.c Wed May 16 09:03:29 2012        
(r235502)
@@ -42,6 +42,7 @@
 #include <sys/callout.h>
 #include <sys/malloc.h>
 #include <sys/priv.h>
+#include <sys/proc.h>
 
 #include <dev/usb/usb.h>
 #include <dev/usb/usbdi.h>

Modified: stable/8/sys/geom/geom_bsd.c
==============================================================================
--- stable/8/sys/geom/geom_bsd.c        Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/geom/geom_bsd.c        Wed May 16 09:03:29 2012        
(r235502)
@@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/errno.h>
 #include <sys/disklabel.h>
 #include <sys/gpt.h>
+#include <sys/proc.h>
 #include <sys/uuid.h>
 #include <geom/geom.h>
 #include <geom/geom_slice.h>

Modified: stable/8/sys/geom/geom_mbr.c
==============================================================================
--- stable/8/sys/geom/geom_mbr.c        Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/geom/geom_mbr.c        Wed May 16 09:03:29 2012        
(r235502)
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/md5.h>
+#include <sys/proc.h>
 
 #include <sys/diskmbr.h>
 #include <sys/sbuf.h>

Modified: stable/8/sys/geom/geom_pc98.c
==============================================================================
--- stable/8/sys/geom/geom_pc98.c       Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/geom/geom_pc98.c       Wed May 16 09:03:29 2012        
(r235502)
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/bio.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
+#include <sys/proc.h>
 
 #include <sys/diskpc98.h>
 #include <geom/geom.h>

Modified: stable/8/sys/kern/kern_lock.c
==============================================================================
--- stable/8/sys/kern/kern_lock.c       Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_lock.c       Wed May 16 09:03:29 2012        
(r235502)
@@ -1207,6 +1207,9 @@ _lockmgr_disown(struct lock *lk, const c
 {
        uintptr_t tid, x;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        tid = (uintptr_t)curthread;
        _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 

Modified: stable/8/sys/kern/kern_mutex.c
==============================================================================
--- stable/8/sys/kern/kern_mutex.c      Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_mutex.c      Wed May 16 09:03:29 2012        
(r235502)
@@ -191,6 +191,8 @@ void
 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(m->mtx_lock != MTX_DESTROYED,
            ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
@@ -210,6 +212,9 @@ _mtx_lock_flags(struct mtx *m, int opts,
 void
 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
 {
+
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(m->mtx_lock != MTX_DESTROYED,
            ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
@@ -231,6 +236,8 @@ void
 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(m->mtx_lock != MTX_DESTROYED,
            ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -253,6 +260,8 @@ void
 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(m->mtx_lock != MTX_DESTROYED,
            ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -281,6 +290,9 @@ _mtx_trylock(struct mtx *m, int opts, co
 #endif
        int rval;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        MPASS(curthread != NULL);
        KASSERT(m->mtx_lock != MTX_DESTROYED,
            ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
@@ -337,6 +349,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
        int64_t sleep_time = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        if (mtx_owned(m)) {
                KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
            ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -507,6 +522,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t 
        uint64_t waittime = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        if (LOCK_LOG_TEST(&m->lock_object, opts))
                CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
 
@@ -554,6 +572,10 @@ _thread_lock_flags(struct thread *td, in
 
        i = 0;
        tid = (uintptr_t)curthread;
+
+       if (SCHEDULER_STOPPED())
+               return;
+
        for (;;) {
 retry:
                spinlock_enter();
@@ -655,6 +677,9 @@ _mtx_unlock_sleep(struct mtx *m, int opt
 {
        struct turnstile *ts;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        if (mtx_recursed(m)) {
                if (--(m->mtx_recurse) == 0)
                        atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);

Modified: stable/8/sys/kern/kern_rmlock.c
==============================================================================
--- stable/8/sys/kern/kern_rmlock.c     Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_rmlock.c     Wed May 16 09:03:29 2012        
(r235502)
@@ -315,6 +315,9 @@ _rm_rlock(struct rmlock *rm, struct rm_p
        struct thread *td = curthread;
        struct pcpu *pc;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        tracker->rmp_flags  = 0;
        tracker->rmp_thread = td;
        tracker->rmp_rmlock = rm;
@@ -383,6 +386,9 @@ _rm_runlock(struct rmlock *rm, struct rm
        struct pcpu *pc;
        struct thread *td = tracker->rmp_thread;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        td->td_critnest++;      /* critical_enter(); */
        pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
        rm_tracker_remove(pc, tracker);
@@ -401,6 +407,9 @@ _rm_wlock(struct rmlock *rm)
        struct rm_priotracker *prio;
        struct turnstile *ts;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        mtx_lock(&rm->rm_lock);
 
        if (rm->rm_noreadtoken == 0) {
@@ -447,6 +456,9 @@ _rm_wunlock(struct rmlock *rm)
 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
            file, line, NULL);
 
@@ -464,6 +476,9 @@ void
 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        curthread->td_locks--;
        WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
        LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
@@ -475,6 +490,9 @@ _rm_rlock_debug(struct rmlock *rm, struc
     const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
 
        _rm_rlock(rm, tracker);
@@ -491,6 +509,9 @@ _rm_runlock_debug(struct rmlock *rm, str
     const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        curthread->td_locks--;
        WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
        LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);

Modified: stable/8/sys/kern/kern_rwlock.c
==============================================================================
--- stable/8/sys/kern/kern_rwlock.c     Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_rwlock.c     Wed May 16 09:03:29 2012        
(r235502)
@@ -229,6 +229,8 @@ void
 _rw_wlock(struct rwlock *rw, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
@@ -245,6 +247,9 @@ _rw_try_wlock(struct rwlock *rw, const c
 {
        int rval;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
 
@@ -269,6 +274,8 @@ void
 _rw_wunlock(struct rwlock *rw, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
@@ -313,6 +320,9 @@ _rw_rlock(struct rwlock *rw, const char 
        int64_t sleep_time = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
        KASSERT(rw_wowner(rw) != curthread,
@@ -495,6 +505,9 @@ _rw_try_rlock(struct rwlock *rw, const c
 {
        uintptr_t x;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        for (;;) {
                x = rw->rw_lock;
                KASSERT(rw->rw_lock != RW_DESTROYED,
@@ -521,6 +534,9 @@ _rw_runlock(struct rwlock *rw, const cha
        struct turnstile *ts;
        uintptr_t x, v, queue;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
        _rw_assert(rw, RA_RLOCKED, file, line);
@@ -646,6 +662,9 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
        int64_t sleep_time = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        if (rw_wlocked(rw)) {
                KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
                    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
@@ -810,6 +829,9 @@ _rw_wunlock_hard(struct rwlock *rw, uint
        uintptr_t v;
        int queue;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        if (rw_wlocked(rw) && rw_recursed(rw)) {
                rw->rw_recurse--;
                if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -872,6 +894,9 @@ _rw_try_upgrade(struct rwlock *rw, const
        struct turnstile *ts;
        int success;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
        _rw_assert(rw, RA_RLOCKED, file, line);
@@ -942,6 +967,9 @@ _rw_downgrade(struct rwlock *rw, const c
        uintptr_t tid, v;
        int rwait, wwait;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        KASSERT(rw->rw_lock != RW_DESTROYED,
            ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
        _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);

Modified: stable/8/sys/kern/kern_shutdown.c
==============================================================================
--- stable/8/sys/kern/kern_shutdown.c   Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_shutdown.c   Wed May 16 09:03:29 2012        
(r235502)
@@ -121,6 +121,11 @@ SYSCTL_INT(_kern, OID_AUTO, sync_on_pani
        &sync_on_panic, 0, "Do a sync before rebooting from a panic");
 TUNABLE_INT("kern.sync_on_panic", &sync_on_panic);
 
+static int stop_scheduler_on_panic = 0;
+SYSCTL_INT(_kern, OID_AUTO, stop_scheduler_on_panic, CTLFLAG_RW | CTLFLAG_TUN,
+    &stop_scheduler_on_panic, 0, "stop scheduler upon entering panic");
+TUNABLE_INT("kern.stop_scheduler_on_panic", &stop_scheduler_on_panic);
+
 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment");
 
 #ifndef DIAGNOSTIC
@@ -292,10 +297,12 @@ boot(int howto)
         * systems don't shutdown properly (i.e., ACPI power off) if we
         * run on another processor.
         */
-       thread_lock(curthread);
-       sched_bind(curthread, 0);
-       thread_unlock(curthread);
-       KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
+       if (!SCHEDULER_STOPPED()) {
+               thread_lock(curthread);
+               sched_bind(curthread, 0);
+               thread_unlock(curthread);
+               KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
+       }
 #endif
        /* We're in the process of rebooting. */
        rebooting = 1;
@@ -551,7 +558,11 @@ panic(const char *fmt, ...)
        va_list ap;
        static char buf[256];
 
-       critical_enter();
+       if (stop_scheduler_on_panic)
+               spinlock_enter();
+       else
+               critical_enter();
+
 #ifdef SMP
        /*
         * We don't want multiple CPU's to panic at the same time, so we
@@ -564,6 +575,19 @@ panic(const char *fmt, ...)
                    PCPU_GET(cpuid)) == 0)
                        while (panic_cpu != NOCPU)
                                ; /* nothing */
+
+       if (stop_scheduler_on_panic) {
+               if (panicstr == NULL && !kdb_active)
+                       stop_cpus_hard(PCPU_GET(other_cpus));
+
+               /*
+                * We set stop_scheduler here and not in the block above,
+                * because we want to ensure that if panic has been called and
+                * stop_scheduler_on_panic is true, then stop_scheduler will
+                * always be set.  Even if panic has been entered from kdb.
+                */
+               td->td_stopsched = 1;
+       }
 #endif
 
        bootopt = RB_AUTOBOOT | RB_DUMP;
@@ -610,7 +634,8 @@ panic(const char *fmt, ...)
        /* thread_unlock(td); */
        if (!sync_on_panic)
                bootopt |= RB_NOSYNC;
-       critical_exit();
+       if (!stop_scheduler_on_panic)
+               critical_exit();
        boot(bootopt);
 }
 

Modified: stable/8/sys/kern/kern_sx.c
==============================================================================
--- stable/8/sys/kern/kern_sx.c Wed May 16 07:18:56 2012        (r235501)
+++ stable/8/sys/kern/kern_sx.c Wed May 16 09:03:29 2012        (r235502)
@@ -238,6 +238,8 @@ _sx_slock(struct sx *sx, int opts, const
 {
        int error = 0;
 
+       if (SCHEDULER_STOPPED())
+               return (0);
        MPASS(curthread != NULL);
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_slock() of destroyed sx @ %s:%d", file, line));
@@ -257,6 +259,9 @@ _sx_try_slock(struct sx *sx, const char 
 {
        uintptr_t x;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        for (;;) {
                x = sx->sx_lock;
                KASSERT(x != SX_LOCK_DESTROYED,
@@ -280,6 +285,8 @@ _sx_xlock(struct sx *sx, int opts, const
 {
        int error = 0;
 
+       if (SCHEDULER_STOPPED())
+               return (0);
        MPASS(curthread != NULL);
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_xlock() of destroyed sx @ %s:%d", file, line));
@@ -301,6 +308,9 @@ _sx_try_xlock(struct sx *sx, const char 
 {
        int rval;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        MPASS(curthread != NULL);
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
@@ -327,6 +337,8 @@ void
 _sx_sunlock(struct sx *sx, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
@@ -342,6 +354,8 @@ void
 _sx_xunlock(struct sx *sx, const char *file, int line)
 {
 
+       if (SCHEDULER_STOPPED())
+               return;
        MPASS(curthread != NULL);
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
@@ -366,6 +380,9 @@ _sx_try_upgrade(struct sx *sx, const cha
        uintptr_t x;
        int success;
 
+       if (SCHEDULER_STOPPED())
+               return (1);
+
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
        _sx_assert(sx, SA_SLOCKED, file, line);
@@ -396,6 +413,9 @@ _sx_downgrade(struct sx *sx, const char 
        uintptr_t x;
        int wakeup_swapper;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
        _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
@@ -478,6 +498,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
        int64_t sleep_time = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return (0);
+
        /* If we already hold an exclusive lock, then recurse. */
        if (sx_xlocked(sx)) {
                KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -678,6 +701,9 @@ _sx_xunlock_hard(struct sx *sx, uintptr_
        uintptr_t x;
        int queue, wakeup_swapper;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
 
        /* If the lock is recursed, then unrecurse one level. */
@@ -750,6 +776,9 @@ _sx_slock_hard(struct sx *sx, int opts, 
        int64_t sleep_time = 0;
 #endif
 
+       if (SCHEDULER_STOPPED())
+               return (0);
+
        /*
         * As with rwlocks, we don't make any attempt to try to block
         * shared locks once there is an exclusive waiter.
@@ -916,6 +945,9 @@ _sx_sunlock_hard(struct sx *sx, const ch
        uintptr_t x;
        int wakeup_swapper;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        for (;;) {
                x = sx->sx_lock;
 

Modified: stable/8/sys/kern/kern_synch.c
==============================================================================
--- stable/8/sys/kern/kern_synch.c      Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/kern_synch.c      Wed May 16 09:03:29 2012        
(r235502)
@@ -158,7 +158,7 @@ _sleep(void *ident, struct lock_object *
        else
                class = NULL;
 
-       if (cold) {
+       if (cold || SCHEDULER_STOPPED()) {
                /*
                 * During autoconfiguration, just return;
                 * don't run any other threads or panic below,
@@ -260,7 +260,7 @@ msleep_spin(void *ident, struct mtx *mtx
        KASSERT(p != NULL, ("msleep1"));
        KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
 
-       if (cold) {
+       if (cold || SCHEDULER_STOPPED()) {
                /*
                 * During autoconfiguration, just return;
                 * don't run any other threads or panic below,
@@ -411,6 +411,8 @@ mi_switch(int flags, struct thread *newt
         */
        if (kdb_active)
                kdb_switch();
+       if (SCHEDULER_STOPPED())
+               return;
        if (flags & SW_VOL)
                td->td_ru.ru_nvcsw++;
        else

Modified: stable/8/sys/kern/subr_kdb.c
==============================================================================
--- stable/8/sys/kern/subr_kdb.c        Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/subr_kdb.c        Wed May 16 09:03:29 2012        
(r235502)
@@ -250,10 +250,7 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS
 void
 kdb_panic(const char *msg)
 {
-       
-#ifdef SMP
-       stop_cpus_hard(PCPU_GET(other_cpus));
-#endif
+
        printf("KDB: panic\n");
        panic("%s", msg);
 }
@@ -611,8 +608,11 @@ kdb_trap(int type, int code, struct trap
        intr = intr_disable();
 
 #ifdef SMP
-       if ((did_stop_cpus = kdb_stop_cpus) != 0)
-               stop_cpus_hard(PCPU_GET(other_cpus));
+       if (!SCHEDULER_STOPPED()) {
+               if ((did_stop_cpus = kdb_stop_cpus) != 0)
+                       stop_cpus_hard(PCPU_GET(other_cpus));
+       } else
+               did_stop_cpus = 0;
 #endif
 
        kdb_active++;

Modified: stable/8/sys/kern/subr_lock.c
==============================================================================
--- stable/8/sys/kern/subr_lock.c       Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/subr_lock.c       Wed May 16 09:03:29 2012        
(r235502)
@@ -537,6 +537,9 @@ lock_profile_obtain_lock_success(struct 
        struct lock_profile_object *l;
        int spin;
 
+       if (SCHEDULER_STOPPED())
+               return;
+
        /* don't reset the timer when/if recursing */
        if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
                return;
@@ -601,6 +604,8 @@ lock_profile_release_lock(struct lock_ob
        struct lpohead *head;
        int spin;
 
+       if (SCHEDULER_STOPPED())
+               return;
        if (lo->lo_flags & LO_NOPROFILE)
                return;
        spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;

Modified: stable/8/sys/kern/subr_witness.c
==============================================================================
--- stable/8/sys/kern/subr_witness.c    Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/kern/subr_witness.c    Wed May 16 09:03:29 2012        
(r235502)
@@ -2144,6 +2144,13 @@ witness_save(struct lock_object *lock, c
        struct lock_instance *instance;
        struct lock_class *class;
 
+       /*
+        * This function is used independently in locking code to deal with
+        * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
+        * is gone.
+        */
+       if (SCHEDULER_STOPPED())
+               return;
        KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
        if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
                return;
@@ -2170,6 +2177,13 @@ witness_restore(struct lock_object *lock
        struct lock_instance *instance;
        struct lock_class *class;
 
+       /*
+        * This function is used independently in locking code to deal with
+        * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
+        * is gone.
+        */
+       if (SCHEDULER_STOPPED())
+               return;
        KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
        if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
                return;

Modified: stable/8/sys/security/mac/mac_priv.c
==============================================================================
--- stable/8/sys/security/mac/mac_priv.c        Wed May 16 07:18:56 2012        
(r235501)
+++ stable/8/sys/security/mac/mac_priv.c        Wed May 16 09:03:29 2012        
(r235502)
@@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
 #include "opt_mac.h"
 
 #include <sys/param.h>
-#include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/priv.h>
 #include <sys/sdt.h>

Modified: stable/8/sys/sys/mutex.h
==============================================================================
--- stable/8/sys/sys/mutex.h    Wed May 16 07:18:56 2012        (r235501)
+++ stable/8/sys/sys/mutex.h    Wed May 16 09:03:29 2012        (r235502)
@@ -380,7 +380,8 @@ do {                                                        
                \
                                                                        \
        if (mtx_owned(&Giant)) {                                        \
                WITNESS_SAVE(&Giant.lock_object, Giant);                \
-               for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++)     \
+               for (_giantcnt = 0; mtx_owned(&Giant) &&                \
+                   !SCHEDULER_STOPPED(); _giantcnt++)                  \
                        mtx_unlock(&Giant);                             \
        }
 

Modified: stable/8/sys/sys/proc.h
==============================================================================
--- stable/8/sys/sys/proc.h     Wed May 16 07:18:56 2012        (r235501)
+++ stable/8/sys/sys/proc.h     Wed May 16 09:03:29 2012        (r235502)
@@ -229,6 +229,7 @@ struct thread {
        short           td_locks;       /* (k) Count of non-spin locks. */
        short           td_rw_rlocks;   /* (k) Count of rwlock read locks. */
        short           td_lk_slocks;   /* (k) Count of lockmgr shared locks. */
+       short           td_stopsched;   /* (k) Scheduler stopped. */
        struct turnstile *td_blocked;   /* (t) Lock thread is blocked on. */
        const char      *td_lockname;   /* (t) Name of lock blocked on. */
        LIST_HEAD(, turnstile) td_contested;    /* (q) Contested locks. */

Modified: stable/8/sys/sys/systm.h
==============================================================================
--- stable/8/sys/sys/systm.h    Wed May 16 07:18:56 2012        (r235501)
+++ stable/8/sys/sys/systm.h    Wed May 16 09:03:29 2012        (r235502)
@@ -111,6 +111,14 @@ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUES
            ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
 
 /*
+ * If we have already panic'd and this is the thread that called
+ * panic(), then don't block on any mutexes but silently succeed.
+ * Otherwise, the kernel will deadlock since the scheduler isn't
+ * going to run the thread that holds any lock we need.
+ */
+#define        SCHEDULER_STOPPED() __predict_false(curthread->td_stopsched)
+
+/*
  * XXX the hints declarations are even more misplaced than most declarations
  * in this file, since they are needed in one file (per arch) and only used
  * in two files.
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to