Module Name: src Committed By: rmind Date: Fri Apr 16 03:21:49 UTC 2010
Modified Files: src/sys/kern: kern_synch.c src/sys/sys: sched.h src/sys/uvm: uvm_extern.h uvm_glue.c uvm_meter.c Log Message: - Merge sched_pstats() and uvm_meter()/uvm_loadav(). Avoids double loop through all LWPs and duplicate locking overhead. - Move sched_pstats() from soft-interrupt context to process 0 main loop. Avoids blocking effect on real-time threads. Mostly fixes PR/38792. Note: it might be worth to move the loop above PRI_PGDAEMON. Also, sched_pstats() might be cleaned-up slightly. To generate a diff of this commit: cvs rdiff -u -r1.280 -r1.281 src/sys/kern/kern_synch.c cvs rdiff -u -r1.71 -r1.72 src/sys/sys/sched.h cvs rdiff -u -r1.162 -r1.163 src/sys/uvm/uvm_extern.h cvs rdiff -u -r1.144 -r1.145 src/sys/uvm/uvm_glue.c cvs rdiff -u -r1.51 -r1.52 src/sys/uvm/uvm_meter.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_synch.c diff -u src/sys/kern/kern_synch.c:1.280 src/sys/kern/kern_synch.c:1.281 --- src/sys/kern/kern_synch.c:1.280 Wed Mar 3 00:47:31 2010 +++ src/sys/kern/kern_synch.c Fri Apr 16 03:21:49 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_synch.c,v 1.280 2010/03/03 00:47:31 yamt Exp $ */ +/* $NetBSD: kern_synch.c,v 1.281 2010/04/16 03:21:49 rmind Exp $ */ /*- * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009 @@ -69,7 +69,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.280 2010/03/03 00:47:31 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.281 2010/04/16 03:21:49 rmind Exp $"); #include "opt_kstack.h" #include "opt_perfctrs.h" @@ -128,7 +128,6 @@ syncobj_noowner, }; -callout_t sched_pstats_ch; unsigned sched_pstats_ticks; kcondvar_t lbolt; /* once a second sleep address */ @@ -152,8 +151,6 @@ { cv_init(&lbolt, "lbolt"); - callout_init(&sched_pstats_ch, CALLOUT_MPSAFE); - callout_setfunc(&sched_pstats_ch, sched_pstats, NULL); evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL, "kpreempt", "defer: critical section"); @@ -161,8 +158,6 @@ "kpreempt", "defer: kernel_lock"); evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL, "kpreempt", "immediate"); - - sched_pstats(NULL); } /* @@ -1148,36 +1143,55 @@ } /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */ -const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; +const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; + +/* + * Constants for averages over 1, 5 and 15 minutes when sampling at + * 5 second intervals. + */ +static const fixpt_t cexp[ ] = { + 0.9200444146293232 * FSCALE, /* exp(-1/12) */ + 0.9834714538216174 * FSCALE, /* exp(-1/60) */ + 0.9944598480048967 * FSCALE, /* exp(-1/180) */ +}; /* * sched_pstats: * - * Update process statistics and check CPU resource allocation. - * Call scheduler-specific hook to eventually adjust process/LWP - * priorities. + * => Update process statistics and check CPU resource allocation. + * => Call scheduler-specific hook to eventually adjust LWP priorities. + * => Compute load average of a quantity on 1, 5 and 15 minute intervals. */ void -sched_pstats(void *arg) +sched_pstats(void) { + extern struct loadavg averunnable; + struct loadavg *avg = &averunnable; const int clkhz = (stathz != 0 ? stathz : hz); - static bool backwards; - struct rlimit *rlim; - struct lwp *l; + static bool backwards = false; + static u_int lavg_count = 0; struct proc *p; - long runtm; - fixpt_t lpctcpu; - u_int lcpticks; - int sig; + int nrun; sched_pstats_ticks++; - + if (++lavg_count >= 5) { + lavg_count = 0; + nrun = 0; + } mutex_enter(proc_lock); PROCLIST_FOREACH(p, &allproc) { + struct lwp *l; + struct rlimit *rlim; + long runtm; + int sig; + /* Increment sleep time (if sleeping), ignore overflow. */ mutex_enter(p->p_lock); runtm = p->p_rtime.sec; LIST_FOREACH(l, &p->p_lwps, l_sibling) { + fixpt_t lpctcpu; + u_int lcpticks; + if (__predict_false((l->l_flag & LW_IDLE) != 0)) continue; lwp_lock(l); @@ -1195,6 +1209,20 @@ lpctcpu += ((FSCALE - ccpu) * (lcpticks * FSCALE / clkhz)) >> FSHIFT; l->l_pctcpu = lpctcpu; + + /* For load average calculation. */ + if (__predict_false(lavg_count == 0)) { + switch (l->l_stat) { + case LSSLEEP: + if (l->l_slptime > 1) { + break; + } + case LSRUN: + case LSONPROC: + case LSIDL: + nrun++; + } + } } /* Calculating p_pctcpu only for ps(1) */ p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; @@ -1227,7 +1255,16 @@ } } mutex_exit(proc_lock); - uvm_meter(); + + /* Load average calculation. */ + if (__predict_false(lavg_count == 0)) { + int i; + for (i = 0; i < __arraycount(cexp); i++) { + avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; + } + } + + /* Lightning bolt. */ cv_broadcast(&lbolt); - callout_schedule(&sched_pstats_ch, hz); } Index: src/sys/sys/sched.h diff -u src/sys/sys/sched.h:1.71 src/sys/sys/sched.h:1.72 --- src/sys/sys/sched.h:1.71 Sat Oct 3 22:32:56 2009 +++ src/sys/sys/sched.h Fri Apr 16 03:21:49 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: sched.h,v 1.71 2009/10/03 22:32:56 elad Exp $ */ +/* $NetBSD: sched.h,v 1.72 2010/04/16 03:21:49 rmind Exp $ */ /*- * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc. @@ -237,7 +237,7 @@ void sched_tick(struct cpu_info *); void schedclock(struct lwp *); void sched_schedclock(struct lwp *); -void sched_pstats(void *); +void sched_pstats(void); void sched_lwp_stats(struct lwp *); void sched_pstats_hook(struct lwp *, int); Index: src/sys/uvm/uvm_extern.h diff -u src/sys/uvm/uvm_extern.h:1.162 src/sys/uvm/uvm_extern.h:1.163 --- src/sys/uvm/uvm_extern.h:1.162 Mon Feb 8 19:02:33 2010 +++ src/sys/uvm/uvm_extern.h Fri Apr 16 03:21:49 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_extern.h,v 1.162 2010/02/08 19:02:33 joerg Exp $ */ +/* $NetBSD: uvm_extern.h,v 1.163 2010/04/16 03:21:49 rmind Exp $ */ /* * @@ -681,7 +681,6 @@ void uvm_whatis(uintptr_t, void (*)(const char *, ...)); /* uvm_meter.c */ -void uvm_meter(void); int uvm_sysctl(int *, u_int, void *, size_t *, void *, size_t, struct proc *); int uvm_pctparam_check(struct uvm_pctparam *, int); Index: src/sys/uvm/uvm_glue.c diff -u src/sys/uvm/uvm_glue.c:1.144 src/sys/uvm/uvm_glue.c:1.145 --- src/sys/uvm/uvm_glue.c:1.144 Thu Feb 25 23:10:49 2010 +++ src/sys/uvm/uvm_glue.c Fri Apr 16 03:21:49 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $ */ +/* $NetBSD: uvm_glue.c,v 1.145 2010/04/16 03:21:49 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -67,7 +67,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.145 2010/04/16 03:21:49 rmind Exp $"); #include "opt_kgdb.h" #include "opt_kstack.h" @@ -78,6 +78,8 @@ */ #include <sys/param.h> +#include <sys/kernel.h> + #include <sys/systm.h> #include <sys/proc.h> #include <sys/resourcevar.h> @@ -414,6 +416,9 @@ /* * uvm_scheduler: process zero main loop. */ + +extern struct loadavg averunnable; + void uvm_scheduler(void) { @@ -425,7 +430,7 @@ lwp_unlock(l); for (;;) { - /* XXX/TODO: move some workload to this LWP? */ - (void)kpause("uvm", false, 0, NULL); + sched_pstats(); + (void)kpause("uvm", false, hz, NULL); } } Index: src/sys/uvm/uvm_meter.c diff -u src/sys/uvm/uvm_meter.c:1.51 src/sys/uvm/uvm_meter.c:1.52 --- src/sys/uvm/uvm_meter.c:1.51 Sun Apr 11 01:53:03 2010 +++ src/sys/uvm/uvm_meter.c Fri Apr 16 03:21:49 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_meter.c,v 1.51 2010/04/11 01:53:03 mrg Exp $ */ +/* $NetBSD: uvm_meter.c,v 1.52 2010/04/16 03:21:49 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -41,7 +41,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.51 2010/04/11 01:53:03 mrg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.52 2010/04/16 03:21:49 rmind Exp $"); #include <sys/param.h> #include <sys/proc.h> @@ -59,73 +59,9 @@ int maxslp = MAXSLP; /* patchable ... */ struct loadavg averunnable; -/* - * constants for averages over 1, 5, and 15 minutes when sampling at - * 5 second intervals. - */ - -static const fixpt_t cexp[3] = { - 0.9200444146293232 * FSCALE, /* exp(-1/12) */ - 0.9834714538216174 * FSCALE, /* exp(-1/60) */ - 0.9944598480048967 * FSCALE, /* exp(-1/180) */ -}; - -/* - * prototypes - */ - -static void uvm_loadav(struct loadavg *); static void uvm_total(struct vmtotal *); /* - * uvm_meter: calculate load average. - */ -void -uvm_meter(void) -{ - static int count; - - if (++count >= 5) { - count = 0; - uvm_loadav(&averunnable); - } -} - -/* - * uvm_loadav: compute a tenex style load average of a quantity on - * 1, 5, and 15 minute intervals. - */ -static void -uvm_loadav(struct loadavg *avg) -{ - int i, nrun; - struct lwp *l; - - nrun = 0; - - mutex_enter(proc_lock); - LIST_FOREACH(l, &alllwp, l_list) { - if ((l->l_flag & (LW_SINTR | LW_SYSTEM)) != 0) - continue; - switch (l->l_stat) { - case LSSLEEP: - if (l->l_slptime > 1) - continue; - /* fall through */ - case LSRUN: - case LSONPROC: - case LSIDL: - nrun++; - } - } - mutex_exit(proc_lock); - - for (i = 0; i < 3; i++) - avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + - nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; -} - -/* * sysctl helper routine for the vm.vmmeter node. */ static int