Module Name: src Committed By: yamt Date: Tue Mar 12 23:16:31 UTC 2013
Modified Files: src/sys/kern: kern_runq.c Log Message: revert rev.1.37 for now. PR/47634 from Ryo ONODERA. while i have no idea how this change can break bge, i don't have hardware and/or time to investigate right now. To generate a diff of this commit: cvs rdiff -u -r1.37 -r1.38 src/sys/kern/kern_runq.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_runq.c diff -u src/sys/kern/kern_runq.c:1.37 src/sys/kern/kern_runq.c:1.38 --- src/sys/kern/kern_runq.c:1.37 Wed Mar 6 11:25:01 2013 +++ src/sys/kern/kern_runq.c Tue Mar 12 23:16:31 2013 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_runq.c,v 1.37 2013/03/06 11:25:01 yamt Exp $ */ +/* $NetBSD: kern_runq.c,v 1.38 2013/03/12 23:16:31 yamt Exp $ */ /* * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.37 2013/03/06 11:25:01 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.38 2013/03/12 23:16:31 yamt Exp $"); #include <sys/param.h> #include <sys/kernel.h> @@ -90,8 +90,6 @@ typedef struct { struct evcnt r_ev_localize; } runqueue_t; -#define AVGCOUNT_SHIFT 4 /* shift for r_avgcount */ - static void * sched_getrq(runqueue_t *, const pri_t); #ifdef MULTIPROCESSOR static lwp_t * sched_catchlwp(struct cpu_info *); @@ -385,7 +383,7 @@ sched_takecpu(struct lwp *l) /* Make sure that thread is in appropriate processor-set */ if (__predict_true(spc->spc_psid == l->l_psid)) { /* If CPU of this thread is idling - run there */ - if (ci_rq->r_avgcount <= (1 << AVGCOUNT_SHIFT)) { + if (ci_rq->r_count == 0) { ci_rq->r_ev_stay.ev_count++; return ci; } @@ -524,20 +522,10 @@ sched_balance(void *nocallout) /* Make lockless countings */ for (CPU_INFO_FOREACH(cii, ci)) { - const bool notidle = ci->ci_data.cpu_idlelwp != ci->ci_curlwp; - u_int nrunning; - ci_rq = ci->ci_schedstate.spc_sched_info; /* Average count of the threads */ - nrunning = ci_rq->r_count + notidle; - ci_rq->r_avgcount = (ci_rq->r_avgcount + - (nrunning << AVGCOUNT_SHIFT)) >> 1; - - /* We are not interested in a CPU without migratable threads */ - if (ci_rq->r_mcount == 0) { - continue; - } + ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1; /* Look for CPU with the highest average */ if (ci_rq->r_avgcount > highest) {