On Fri, May 08, 2015 at 20:28 +0200, Mark Kettenis wrote:
> > Date: Fri, 8 May 2015 20:15:58 +0200
> > From: Mike Belopuhov <[email protected]>
> >
> > On Fri, May 08, 2015 at 12:34 +0200, Mike Belopuhov wrote:
> > > > I think tsleep(9) and msleep(9) need to release and re-acquire the
> > > > kernel lock in the "cold || panicstr" case.
> > >
> > > Well, it's not hard to do really, but...
> > >
> > > > We might need this for
> > > > handling interrupts during autoconf as soon as we start distributing
> > > > interrupts over CPUs.
> > > >
> > >
> > > ...cold might mean that interrupts are not ready yet. So then we might
> > > need another flag for shutdown?
> >
> > This is what I have come up with. Chunks were taken directly from
> > mi_switch and it seems to do the job just fine. Right now I'm not
> > using any additional flags and it seems to work here. I'll resume
> > testing on Monday, but it looks fairly complete. Any comments?
>
> Makes sense to me. The msleep/tsleep code could be simplified to:
>
> if (__mp_lock_held(&kernel_lock)) {
> hold_count = __mp_release_all(&kernel_lock);
> __mp_acquire_count(&kernel_lock, hold_count);
> }
>
Indeed.
> I'm also wondering whether we should change __mp_release_all() to
> simple return 0 if the current CPU does not hold the lock, such that
> the __mp_lock_held() check isn't needed anymore. But that's a
> separate issue.
>
It would have made the __mp_release_all safer as well since it
wouldn't require an external check.
I don't have any firther input on this, diff works fine here.
OK?
diff --git sys/kern/kern_synch.c sys/kern/kern_synch.c
index 03308b4..6f573fc 100644
--- sys/kern/kern_synch.c
+++ sys/kern/kern_synch.c
@@ -103,10 +103,13 @@ extern int safepri;
int
tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
{
struct sleep_state sls;
int error, error1;
+#ifdef MULTIPROCESSOR
+ int hold_count;
+#endif
KASSERT((priority & ~(PRIMASK | PCATCH)) == 0);
#ifdef MULTIPROCESSOR
KASSERT(timo || __mp_lock_held(&kernel_lock));
@@ -120,10 +123,16 @@ tsleep(const volatile void *ident, int priority, const
char *wmesg, int timo)
* don't run any other procs or panic below,
* in case this is the idle process and already asleep.
*/
s = splhigh();
splx(safepri);
+#ifdef MULTIPROCESSOR
+ if (__mp_lock_held(&kernel_lock)) {
+ hold_count = __mp_release_all(&kernel_lock);
+ __mp_acquire_count(&kernel_lock, hold_count);
+ }
+#endif
splx(s);
return (0);
}
sleep_setup(&sls, ident, priority, wmesg);
@@ -149,10 +158,13 @@ int
msleep(const volatile void *ident, struct mutex *mtx, int priority,
const char *wmesg, int timo)
{
struct sleep_state sls;
int error, error1, spl;
+#ifdef MULTIPROCESSOR
+ int hold_count;
+#endif
KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
KASSERT(mtx != NULL);
if (cold || panicstr) {
@@ -163,10 +175,16 @@ msleep(const volatile void *ident, struct mutex *mtx, int
priority,
* in case this is the idle process and already asleep.
*/
spl = MUTEX_OLDIPL(mtx);
MUTEX_OLDIPL(mtx) = safepri;
mtx_leave(mtx);
+#ifdef MULTIPROCESSOR
+ if (__mp_lock_held(&kernel_lock)) {
+ hold_count = __mp_release_all(&kernel_lock);
+ __mp_acquire_count(&kernel_lock, hold_count);
+ }
+#endif
if ((priority & PNORELOCK) == 0) {
mtx_enter(mtx);
MUTEX_OLDIPL(mtx) = spl;
} else
splx(spl);
diff --git sys/kern/vfs_subr.c sys/kern/vfs_subr.c
index a26fbe2..a373789 100644
--- sys/kern/vfs_subr.c
+++ sys/kern/vfs_subr.c
@@ -1664,10 +1664,13 @@ int
vfs_syncwait(int verbose)
{
struct buf *bp;
int iter, nbusy, dcount, s;
struct proc *p;
+#ifdef MULTIPROCESSOR
+ int hold_count;
+#endif
p = curproc? curproc : &proc0;
sys_sync(p, (void *)0, (register_t *)0);
/* Wait for sync to finish. */
@@ -1698,11 +1701,21 @@ vfs_syncwait(int verbose)
}
if (nbusy == 0)
break;
if (verbose)
printf("%d ", nbusy);
+#ifdef MULTIPROCESSOR
+ if (__mp_lock_held(&kernel_lock))
+ hold_count = __mp_release_all(&kernel_lock);
+ else
+ hold_count = 0;
+#endif
DELAY(40000 * iter);
+#ifdef MULTIPROCESSOR
+ if (hold_count)
+ __mp_acquire_count(&kernel_lock, hold_count);
+#endif
}
return nbusy;
}