Module Name: src Committed By: rmind Date: Sat Dec 18 01:36:20 UTC 2010
Modified Files: src/sys/kern: kern_exec.c kern_exit.c kern_lwp.c kern_timeout.c sys_select.c src/sys/rump/librump/rumpkern: sleepq.c src/sys/sys: lwp.h sleepq.h Log Message: - Fix a few possible locking issues in execve1() and exit1(). Add a note that scheduler locks are special in this regard - adaptive locks cannot be in the path due to turnstiles. Randomly spotted/reported by uebay...@. - Remove unused lwp_relock() and replace lwp_lock_retry() by simplifying lwp_lock() and sleepq_enter() a little. - Give alllwp its own cache-line and mark lwp_cache pointer as read-mostly. OK ad@ To generate a diff of this commit: cvs rdiff -u -r1.303 -r1.304 src/sys/kern/kern_exec.c cvs rdiff -u -r1.230 -r1.231 src/sys/kern/kern_exit.c cvs rdiff -u -r1.151 -r1.152 src/sys/kern/kern_lwp.c cvs rdiff -u -r1.44 -r1.45 src/sys/kern/kern_timeout.c cvs rdiff -u -r1.28 -r1.29 src/sys/kern/sys_select.c cvs rdiff -u -r1.8 -r1.9 src/sys/rump/librump/rumpkern/sleepq.c cvs rdiff -u -r1.138 -r1.139 src/sys/sys/lwp.h cvs rdiff -u -r1.18 -r1.19 src/sys/sys/sleepq.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_exec.c diff -u src/sys/kern/kern_exec.c:1.303 src/sys/kern/kern_exec.c:1.304 --- src/sys/kern/kern_exec.c:1.303 Tue Nov 30 10:43:05 2010 +++ src/sys/kern/kern_exec.c Sat Dec 18 01:36:19 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_exec.c,v 1.303 2010/11/30 10:43:05 dholland Exp $ */ +/* $NetBSD: kern_exec.c,v 1.304 2010/12/18 01:36:19 rmind Exp $ */ /*- * Copyright (c) 2008 The NetBSD Foundation, Inc. @@ -59,7 +59,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.303 2010/11/30 10:43:05 dholland Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.304 2010/12/18 01:36:19 rmind Exp $"); #include "opt_ktrace.h" #include "opt_modular.h" @@ -1165,8 +1165,10 @@ l->l_stat = LSSTOP; p->p_stat = SSTOP; p->p_nrlwps--; + lwp_unlock(l); mutex_exit(p->p_lock); mutex_exit(proc_lock); + lwp_lock(l); mi_switch(l); ksiginfo_queue_drain(&kq); KERNEL_LOCK(l->l_biglocks, l); Index: src/sys/kern/kern_exit.c diff -u src/sys/kern/kern_exit.c:1.230 src/sys/kern/kern_exit.c:1.231 --- src/sys/kern/kern_exit.c:1.230 Wed Jul 7 01:30:37 2010 +++ src/sys/kern/kern_exit.c Sat Dec 18 01:36:19 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_exit.c,v 1.230 2010/07/07 01:30:37 chs Exp $ */ +/* $NetBSD: kern_exit.c,v 1.231 2010/12/18 01:36:19 rmind Exp $ */ /*- * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.230 2010/07/07 01:30:37 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.231 2010/12/18 01:36:19 rmind Exp $"); #include "opt_ktrace.h" #include "opt_perfctrs.h" @@ -244,7 +244,9 @@ lwp_lock(l); p->p_nrlwps--; l->l_stat = LSSTOP; + lwp_unlock(l); mutex_exit(p->p_lock); + lwp_lock(l); mi_switch(l); KERNEL_LOCK(l->l_biglocks, l); mutex_enter(p->p_lock); Index: src/sys/kern/kern_lwp.c diff -u src/sys/kern/kern_lwp.c:1.151 src/sys/kern/kern_lwp.c:1.152 --- src/sys/kern/kern_lwp.c:1.151 Wed Jul 7 01:30:37 2010 +++ src/sys/kern/kern_lwp.c Sat Dec 18 01:36:19 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $ */ +/* $NetBSD: kern_lwp.c,v 1.152 2010/12/18 01:36:19 rmind Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -153,9 +153,11 @@ * each field are documented in sys/lwp.h. * * State transitions must be made with the LWP's general lock held, - * and may cause the LWP's lock pointer to change. Manipulation of + * and may cause the LWP's lock pointer to change. Manipulation of * the general lock is not performed directly, but through calls to - * lwp_lock(), lwp_relock() and similar. + * lwp_lock(), lwp_unlock() and others. It should be noted that the + * adaptive locks are not allowed to be released while the LWP's lock + * is being held (unlike for other spin-locks). * * States and their associated locks: * @@ -209,7 +211,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.152 2010/12/18 01:36:19 rmind Exp $"); #include "opt_ddb.h" #include "opt_lockdebug.h" @@ -242,8 +244,8 @@ #include <uvm/uvm_extern.h> #include <uvm/uvm_object.h> -struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp); -static pool_cache_t lwp_cache; +static pool_cache_t lwp_cache __read_mostly; +struct lwplist alllwp __cacheline_aligned; /* DTrace proc provider probes */ SDT_PROBE_DEFINE(proc,,,lwp_create, @@ -284,6 +286,7 @@ lwpinit(void) { + LIST_INIT(&alllwp); lwpinit_specificdata(); lwp_sys_init(); lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0, @@ -1242,40 +1245,6 @@ } /* - * Lock an LWP. - */ -kmutex_t * -lwp_lock_retry(struct lwp *l, kmutex_t *old) -{ - - /* - * XXXgcc ignoring kmutex_t * volatile on i386 - * - * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021) - */ -#if 1 - while (l->l_mutex != old) { -#else - for (;;) { -#endif - mutex_spin_exit(old); - old = l->l_mutex; - mutex_spin_enter(old); - - /* - * mutex_enter() will have posted a read barrier. Re-test - * l->l_mutex. If it has changed, we need to try again. - */ -#if 1 - } -#else - } while (__predict_false(l->l_mutex != old)); -#endif - - return old; -} - -/* * Lend a new mutex to an LWP. The old mutex must be held. */ void @@ -1297,7 +1266,7 @@ { kmutex_t *old; - KASSERT(mutex_owned(l->l_mutex)); + KASSERT(lwp_locked(l, NULL)); old = l->l_mutex; membar_exit(); @@ -1305,25 +1274,6 @@ mutex_spin_exit(old); } -/* - * Acquire a new mutex, and donate it to an LWP. The LWP must already be - * locked. - */ -void -lwp_relock(struct lwp *l, kmutex_t *new) -{ - kmutex_t *old; - - KASSERT(mutex_owned(l->l_mutex)); - - old = l->l_mutex; - if (old != new) { - mutex_spin_enter(new); - l->l_mutex = new; - mutex_spin_exit(old); - } -} - int lwp_trylock(struct lwp *l) { @@ -1346,7 +1296,6 @@ (*l->l_syncobj->sobj_unsleep)(l, cleanup); } - /* * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is * set. Index: src/sys/kern/kern_timeout.c diff -u src/sys/kern/kern_timeout.c:1.44 src/sys/kern/kern_timeout.c:1.45 --- src/sys/kern/kern_timeout.c:1.44 Sat Mar 21 13:11:14 2009 +++ src/sys/kern/kern_timeout.c Sat Dec 18 01:36:19 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_timeout.c,v 1.44 2009/03/21 13:11:14 ad Exp $ */ +/* $NetBSD: kern_timeout.c,v 1.45 2010/12/18 01:36:19 rmind Exp $ */ /*- * Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -59,7 +59,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.44 2009/03/21 13:11:14 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.45 2010/12/18 01:36:19 rmind Exp $"); /* * Timeouts are kept in a hierarchical timing wheel. The c_time is the @@ -87,6 +87,7 @@ #include <sys/systm.h> #include <sys/kernel.h> #include <sys/callout.h> +#include <sys/lwp.h> #include <sys/mutex.h> #include <sys/proc.h> #include <sys/sleepq.h> Index: src/sys/kern/sys_select.c diff -u src/sys/kern/sys_select.c:1.28 src/sys/kern/sys_select.c:1.29 --- src/sys/kern/sys_select.c:1.28 Fri Oct 15 05:39:19 2010 +++ src/sys/kern/sys_select.c Sat Dec 18 01:36:19 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_select.c,v 1.28 2010/10/15 05:39:19 rmind Exp $ */ +/* $NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $ */ /*- * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc. @@ -84,21 +84,19 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.28 2010/10/15 05:39:19 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $"); #include <sys/param.h> #include <sys/systm.h> #include <sys/filedesc.h> -#include <sys/ioctl.h> #include <sys/file.h> #include <sys/proc.h> #include <sys/socketvar.h> #include <sys/signalvar.h> #include <sys/uio.h> #include <sys/kernel.h> -#include <sys/stat.h> +#include <sys/lwp.h> #include <sys/poll.h> -#include <sys/vnode.h> #include <sys/mount.h> #include <sys/syscallargs.h> #include <sys/cpu.h> Index: src/sys/rump/librump/rumpkern/sleepq.c diff -u src/sys/rump/librump/rumpkern/sleepq.c:1.8 src/sys/rump/librump/rumpkern/sleepq.c:1.9 --- src/sys/rump/librump/rumpkern/sleepq.c:1.8 Fri Jul 23 19:14:14 2010 +++ src/sys/rump/librump/rumpkern/sleepq.c Sat Dec 18 01:36:20 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: sleepq.c,v 1.8 2010/07/23 19:14:14 pooka Exp $ */ +/* $NetBSD: sleepq.c,v 1.9 2010/12/18 01:36:20 rmind Exp $ */ /* * Copyright (c) 2008 Antti Kantee. All Rights Reserved. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.8 2010/07/23 19:14:14 pooka Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.9 2010/12/18 01:36:20 rmind Exp $"); #include <sys/param.h> #include <sys/condvar.h> @@ -161,18 +161,15 @@ return NULL; } -/* - * XXX: used only by callout, therefore here. should try to use - * one in kern_lwp directly. - */ -kmutex_t * -lwp_lock_retry(struct lwp *l, kmutex_t *old) +void +lwp_unlock_to(struct lwp *l, kmutex_t *new) { + kmutex_t *old; - while (l->l_mutex != old) { - mutex_spin_exit(old); - old = l->l_mutex; - mutex_spin_enter(old); - } - return old; + KASSERT(mutex_owned(l->l_mutex)); + + old = l->l_mutex; + membar_exit(); + l->l_mutex = new; + mutex_spin_exit(old); } Index: src/sys/sys/lwp.h diff -u src/sys/sys/lwp.h:1.138 src/sys/sys/lwp.h:1.139 --- src/sys/sys/lwp.h:1.138 Wed Sep 1 19:37:58 2010 +++ src/sys/sys/lwp.h Sat Dec 18 01:36:20 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: lwp.h,v 1.138 2010/09/01 19:37:58 pooka Exp $ */ +/* $NetBSD: lwp.h,v 1.139 2010/12/18 01:36:20 rmind Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010 @@ -299,7 +299,6 @@ int lwp_locked(lwp_t *, kmutex_t *); void lwp_setlock(lwp_t *, kmutex_t *); void lwp_unlock_to(lwp_t *, kmutex_t *); -kmutex_t *lwp_lock_retry(lwp_t *, kmutex_t *); void lwp_relock(lwp_t *, kmutex_t *); int lwp_trylock(lwp_t *); void lwp_addref(lwp_t *); @@ -360,16 +359,18 @@ static inline void lwp_lock(lwp_t *l) { - kmutex_t *old; - - mutex_spin_enter(old = l->l_mutex); + kmutex_t *old = l->l_mutex; /* - * mutex_enter() will have posted a read barrier. Re-test - * l->l_mutex. If it has changed, we need to try again. + * Note: mutex_spin_enter() will have posted a read barrier. + * Re-test l->l_mutex. If it has changed, we need to try again. */ - if (__predict_false(l->l_mutex != old)) - lwp_lock_retry(l, old); + mutex_spin_enter(old); + while (__predict_false(l->l_mutex != old)) { + mutex_spin_exit(old); + old = l->l_mutex; + mutex_spin_enter(old); + } } /* Index: src/sys/sys/sleepq.h diff -u src/sys/sys/sleepq.h:1.18 src/sys/sys/sleepq.h:1.19 --- src/sys/sys/sleepq.h:1.18 Sun Nov 22 18:40:26 2009 +++ src/sys/sys/sleepq.h Sat Dec 18 01:36:20 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: sleepq.h,v 1.18 2009/11/22 18:40:26 mbalmer Exp $ */ +/* $NetBSD: sleepq.h,v 1.19 2010/12/18 01:36:20 rmind Exp $ */ /*- * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -120,20 +120,13 @@ static inline void sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp) { - kmutex_t *omp; /* - * Acquire the per-LWP mutex and lend it ours (the sleep queue - * lock). Once that's done we're interlocked, and so can release - * the kernel lock. + * Acquire the per-LWP mutex and lend it ours sleep queue lock. + * Once interlocked, we can release the kernel lock. */ - omp = l->l_mutex; - mutex_spin_enter(omp); - if (__predict_false(l->l_mutex != omp)) { - omp = lwp_lock_retry(l, omp); - } - l->l_mutex = mp; - mutex_spin_exit(omp); + lwp_lock(l); + lwp_unlock_to(l, mp); KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks); }