P_BIGLOCK is only used to figure out if the process holds the biglock.
The problem with this is that the first entry point from a sleepable context
to the kernel needs to call KERNEL_PROC_LOCK while recursive (or non-process)
entry points need to call KERNEL_LOCK. Pedro showed at least one entry
point where we got it wrong, there might be others.
Instead of playing with the flag in mi_switch, just check that we're the
current biglock holder. Make KERNEL_PROC_LOCK and KERNEL_LOCK more or less
equivalent.
Cleanup will come after.
//art
Index: kern/kern_lock.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_lock.c,v
retrieving revision 1.35
diff -u -r1.35 kern_lock.c
--- kern/kern_lock.c 26 Apr 2010 05:48:17 -0000 1.35
+++ kern/kern_lock.c 5 Jul 2011 19:34:47 -0000
@@ -378,13 +378,11 @@
{
SCHED_ASSERT_UNLOCKED();
__mp_lock(&kernel_lock);
- atomic_setbits_int(&p->p_flag, P_BIGLOCK);
}
void
_kernel_proc_unlock(struct proc *p)
{
- atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
__mp_unlock(&kernel_lock);
}
Index: kern/sched_bsd.c
===================================================================
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.25
diff -u -r1.25 sched_bsd.c
--- kern/sched_bsd.c 7 Mar 2011 07:07:13 -0000 1.25
+++ kern/sched_bsd.c 5 Jul 2011 19:34:47 -0000
@@ -366,8 +366,10 @@
* Release the kernel_lock, as we are about to yield the CPU.
*/
sched_count = __mp_release_all_but_one(&sched_lock);
- if (p->p_flag & P_BIGLOCK)
+ if (__mp_lock_held(&kernel_lock))
hold_count = __mp_release_all(&kernel_lock);
+ else
+ hold_count = 0;
#endif
/*
@@ -448,7 +450,7 @@
* released the scheduler lock to avoid deadlock, and before
* we reacquire the interlock and the scheduler lock.
*/
- if (p->p_flag & P_BIGLOCK)
+ if (hold_count)
__mp_acquire_count(&kernel_lock, hold_count);
__mp_acquire_count(&sched_lock, sched_count + 1);
#endif