Module Name: src Committed By: rmind Date: Sun Aug 7 21:13:06 UTC 2011
Modified Files: src/sys/kern: kern_cpu.c kern_lwp.c kern_runq.c subr_kcpuset.c sys_pset.c sys_sched.c src/sys/sys: lwp.h Log Message: Remove LW_AFFINITY flag and fix some bugs affinity mask handling. To generate a diff of this commit: cvs rdiff -u -r1.48 -r1.49 src/sys/kern/kern_cpu.c cvs rdiff -u -r1.161 -r1.162 src/sys/kern/kern_lwp.c cvs rdiff -u -r1.31 -r1.32 src/sys/kern/kern_runq.c cvs rdiff -u -r1.1 -r1.2 src/sys/kern/subr_kcpuset.c cvs rdiff -u -r1.16 -r1.17 src/sys/kern/sys_pset.c cvs rdiff -u -r1.36 -r1.37 src/sys/kern/sys_sched.c cvs rdiff -u -r1.154 -r1.155 src/sys/sys/lwp.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_cpu.c diff -u src/sys/kern/kern_cpu.c:1.48 src/sys/kern/kern_cpu.c:1.49 --- src/sys/kern/kern_cpu.c:1.48 Sun Aug 7 13:33:01 2011 +++ src/sys/kern/kern_cpu.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $ */ +/* $NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $ */ /*- * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc. @@ -56,7 +56,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -311,13 +311,12 @@ lwp_unlock(l); continue; } - /* Normal case - no affinity */ - if ((l->l_flag & LW_AFFINITY) == 0) { + /* Regular case - no affinity. */ + if (l->l_affinity == NULL) { lwp_migrate(l, target_ci); continue; } - /* Affinity is set, find an online CPU in the set */ - KASSERT(l->l_affinity != NULL); + /* Affinity is set, find an online CPU in the set. */ for (CPU_INFO_FOREACH(cii, mci)) { mspc = &mci->ci_schedstate; if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && Index: src/sys/kern/kern_lwp.c diff -u src/sys/kern/kern_lwp.c:1.161 src/sys/kern/kern_lwp.c:1.162 --- src/sys/kern/kern_lwp.c:1.161 Sat Jul 30 17:01:04 2011 +++ src/sys/kern/kern_lwp.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $ */ +/* $NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -211,7 +211,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $"); #include "opt_ddb.h" #include "opt_lockdebug.h" @@ -803,18 +803,19 @@ p2->p_nlwps++; p2->p_nrlwps++; + KASSERT(l2->l_affinity == NULL); + if ((p2->p_flag & PK_SYSTEM) == 0) { - /* Inherit an affinity */ - if (l1->l_flag & LW_AFFINITY) { + /* Inherit the affinity mask. */ + if (l1->l_affinity) { /* * Note that we hold the state lock while inheriting * the affinity to avoid race with sched_setaffinity(). */ lwp_lock(l1); - if (l1->l_flag & LW_AFFINITY) { + if (l1->l_affinity) { kcpuset_use(l1->l_affinity); l2->l_affinity = l1->l_affinity; - l2->l_flag |= LW_AFFINITY; } lwp_unlock(l1); } @@ -987,12 +988,8 @@ lwp_lock(l); l->l_stat = LSZOMB; - if (l->l_name != NULL) + if (l->l_name != NULL) { strcpy(l->l_name, "(zombie)"); - if (l->l_flag & LW_AFFINITY) { - l->l_flag &= ~LW_AFFINITY; - } else { - KASSERT(l->l_affinity == NULL); } lwp_unlock(l); p->p_nrlwps--; @@ -1001,12 +998,6 @@ l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; mutex_exit(p->p_lock); - /* Safe without lock since LWP is in zombie state */ - if (l->l_affinity) { - kcpuset_unuse(l->l_affinity, NULL); - l->l_affinity = NULL; - } - /* * We can no longer block. At this point, lwp_free() may already * be gunning for us. On a multi-CPU system, we may be off p_lwps. @@ -1103,6 +1094,17 @@ cv_destroy(&l->l_sigcv); /* + * Free lwpctl structure and affinity. + */ + if (l->l_lwpctl) { + lwp_ctl_free(l); + } + if (l->l_affinity) { + kcpuset_unuse(l->l_affinity, NULL); + l->l_affinity = NULL; + } + + /* * Free the LWP's turnstile and the LWP structure itself unless the * caller wants to recycle them. Also, free the scheduler specific * data. @@ -1112,8 +1114,6 @@ * * We don't recycle the VM resources at this time. */ - if (l->l_lwpctl != NULL) - lwp_ctl_free(l); if (!recycle && l->l_ts != &turnstile0) pool_cache_put(turnstile_cache, l->l_ts); Index: src/sys/kern/kern_runq.c diff -u src/sys/kern/kern_runq.c:1.31 src/sys/kern/kern_runq.c:1.32 --- src/sys/kern/kern_runq.c:1.31 Sun Aug 7 13:33:01 2011 +++ src/sys/kern/kern_runq.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $ */ +/* $NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $ */ /* * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $"); #include <sys/param.h> #include <sys/kernel.h> @@ -346,15 +346,15 @@ const struct schedstate_percpu *spc = &ci->ci_schedstate; KASSERT(lwp_locked(__UNCONST(l), NULL)); - /* CPU is offline */ + /* Is CPU offline? */ if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) return false; - /* Affinity bind */ - if (__predict_false(l->l_flag & LW_AFFINITY)) + /* Is affinity set? */ + if (__predict_false(l->l_affinity)) return kcpuset_isset(l->l_affinity, cpu_index(ci)); - /* Processor-set */ + /* Is there a processor-set? */ return (spc->spc_psid == l->l_psid); } Index: src/sys/kern/subr_kcpuset.c diff -u src/sys/kern/subr_kcpuset.c:1.1 src/sys/kern/subr_kcpuset.c:1.2 --- src/sys/kern/subr_kcpuset.c:1.1 Sun Aug 7 13:33:01 2011 +++ src/sys/kern/subr_kcpuset.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $ */ +/* $NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $ */ /*- * Copyright (c) 2011 The NetBSD Foundation, Inc. @@ -41,7 +41,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -216,17 +216,16 @@ void kcpuset_destroy(kcpuset_t *kcp) { - kcpuset_impl_t *kc, *nkc; + kcpuset_impl_t *kc; KASSERT(kc_initialised); KASSERT(kcp != NULL); - kc = KC_GETSTRUCT(kcp); do { - nkc = KC_GETSTRUCT(kc->kc_next); + kc = KC_GETSTRUCT(kcp); + kcp = kc->kc_next; pool_cache_put(kc_cache, kc); - kc = nkc; - } while (kc); + } while (kcp); } /* Index: src/sys/kern/sys_pset.c diff -u src/sys/kern/sys_pset.c:1.16 src/sys/kern/sys_pset.c:1.17 --- src/sys/kern/sys_pset.c:1.16 Sun Aug 7 13:33:01 2011 +++ src/sys/kern/sys_pset.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $ */ +/* $NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $ */ /* * Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org> @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $"); #include <sys/param.h> @@ -366,10 +366,11 @@ * with this target CPU in it. */ LIST_FOREACH(t, &alllwp, l_list) { - if ((t->l_flag & LW_AFFINITY) == 0) + if (t->l_affinity == NULL) { continue; + } lwp_lock(t); - if ((t->l_flag & LW_AFFINITY) == 0) { + if (t->l_affinity == NULL) { lwp_unlock(t); continue; } Index: src/sys/kern/sys_sched.c diff -u src/sys/kern/sys_sched.c:1.36 src/sys/kern/sys_sched.c:1.37 --- src/sys/kern/sys_sched.c:1.36 Sun Aug 7 13:33:01 2011 +++ src/sys/kern/sys_sched.c Sun Aug 7 21:13:05 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $ */ +/* $NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $ */ /* * Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org> @@ -42,7 +42,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $"); #include <sys/param.h> @@ -425,32 +425,33 @@ } #endif - /* Find the LWP(s) */ + /* Iterate through LWP(s). */ lcnt = 0; lid = SCARG(uap, lid); LIST_FOREACH(t, &p->p_lwps, l_sibling) { - if (lid && lid != t->l_lid) + if (lid && lid != t->l_lid) { continue; + } lwp_lock(t); - /* It is not allowed to set the affinity for zombie LWPs */ + /* No affinity for zombie LWPs. */ if (t->l_stat == LSZOMB) { lwp_unlock(t); continue; } + /* First, release existing affinity, if any. */ + if (t->l_affinity) { + kcpuset_unuse(t->l_affinity, &kcpulst); + } if (kcset) { - /* Set the affinity flag and new CPU set */ - t->l_flag |= LW_AFFINITY; + /* + * Hold a reference on affinity mask, assign mask to + * LWP and migrate it to another CPU (unlocks LWP). + */ kcpuset_use(kcset); - if (t->l_affinity != NULL) - kcpuset_unuse(t->l_affinity, &kcpulst); t->l_affinity = kcset; - /* Migrate to another CPU, unlocks LWP */ lwp_migrate(t, ci); } else { - /* Unset the affinity flag */ - t->l_flag &= ~LW_AFFINITY; - if (t->l_affinity != NULL) - kcpuset_unuse(t->l_affinity, &kcpulst); + /* Old affinity mask is released, just clear. */ t->l_affinity = NULL; lwp_unlock(t); } @@ -511,8 +512,7 @@ goto out; } lwp_lock(t); - if (t->l_flag & LW_AFFINITY) { - KASSERT(t->l_affinity != NULL); + if (t->l_affinity) { kcpuset_copy(kcset, t->l_affinity); } else { kcpuset_zero(kcset); Index: src/sys/sys/lwp.h diff -u src/sys/sys/lwp.h:1.154 src/sys/sys/lwp.h:1.155 --- src/sys/sys/lwp.h:1.154 Sun Aug 7 14:03:16 2011 +++ src/sys/sys/lwp.h Sun Aug 7 21:13:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: lwp.h,v 1.154 2011/08/07 14:03:16 rmind Exp $ */ +/* $NetBSD: lwp.h,v 1.155 2011/08/07 21:13:06 rmind Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010 @@ -227,7 +227,6 @@ #define LW_BATCH 0x00040000 /* LWP tends to hog CPU */ #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */ #define LW_WEXIT 0x00100000 /* Exit before return to user */ -#define LW_AFFINITY 0x00200000 /* Affinity is assigned to the thread */ #define LW_SA_UPCALL 0x00400000 /* SA upcall is pending */ #define LW_SA_BLOCKING 0x00800000 /* Blocking in tsleep() */ #define LW_PENDSIG 0x01000000 /* Pending signal for us */