Re: Scheduler improvements, take 1001, Patch 1/5

2012-10-09 Thread Gregor Best
diff --git a/kern/sched_bsd.c b/kern/sched_bsd.c
index 172bb8f..c7121dc 100644
--- a/kern/sched_bsd.c
+++ b/kern/sched_bsd.c
@@ -77,12 +77,12 @@ scheduler_start(void)
 
timeout_set(schedcpu_to, schedcpu, schedcpu_to);
 
-   rrticks_init = hz / 10;
+   rrticks_init = hz / 20;
schedcpu(schedcpu_to);
 }
 
 /*
- * Force switch among equal priority processes every 100ms.
+ * Force switch among equal priority processes every 50ms.
  */
 void
 roundrobin(struct cpu_info *ci)
-- 
1.7.6



Re: Scheduler improvements, take 1001, Patch 1/5

2012-10-09 Thread Gregor Best
diff --git a/kern/kern_clock.c b/kern/kern_clock.c
index 843965b..f598afc 100644
--- a/kern/kern_clock.c
+++ b/kern/kern_clock.c
@@ -233,7 +233,7 @@ hardclock(struct clockframe *frame)
if (stathz == 0)
statclock(frame);
 
-   if (--ci-ci_schedstate.spc_rrticks = 0)
+   if (p  (--(p-p_rrticks) = 0))
roundrobin(ci);
 
/*
diff --git a/kern/kern_proc.c b/kern/kern_proc.c
index ad861c8..e0d5536 100644
--- a/kern/kern_proc.c
+++ b/kern/kern_proc.c
@@ -398,8 +398,6 @@ proc_printit(struct proc *p, const char *modif, int 
(*pr)(const char *, ...))
p-p_comm, p-p_pid, pst, p-p_flag, P_BITS);
(*pr)(pri=%u, usrpri=%u, nice=%d\n,
p-p_priority, p-p_usrpri, p-p_p-ps_nice);
-   (*pr)(forw=%p, list=%p,%p\n,
-   TAILQ_NEXT(p, p_runq), p-p_list.le_next, p-p_list.le_prev);
(*pr)(process=%p user=%p, vmspace=%p\n,
p-p_p, p-p_addr, p-p_vmspace);
(*pr)(estcpu=%u, cpticks=%d, pctcpu=%u.%u, swtime=%u\n,
diff --git a/kern/kern_sched.c b/kern/kern_sched.c
index 253226a..79eb28c 100644
--- a/kern/kern_sched.c
+++ b/kern/kern_sched.c
@@ -24,11 +24,22 @@
 #include sys/resourcevar.h
 #include sys/signalvar.h
 #include sys/mutex.h
+#include sys/tree.h
 
 #include uvm/uvm_extern.h
 
 #include sys/malloc.h
 
+static int
+sched_cmp_proc(struct proc *a, struct proc *b) {
+   if (a == b)
+   return 0;
+   if (timercmp((a-p_deadline), (b-p_deadline), ))
+   return -1;
+   return 1;
+}
+
+RB_GENERATE_STATIC(prochead, proc, p_runq, sched_cmp_proc);
 
 void sched_kthreads_create(void *);
 
@@ -79,10 +90,8 @@ void
 sched_init_cpu(struct cpu_info *ci)
 {
struct schedstate_percpu *spc = ci-ci_schedstate;
-   int i;
 
-   for (i = 0; i  SCHED_NQS; i++)
-   TAILQ_INIT(spc-spc_qs[i]);
+   RB_INIT(spc-spc_runq);
 
spc-spc_idleproc = NULL;
 
@@ -158,18 +167,19 @@ sched_idle(void *v)
 
cpuset_add(sched_idle_cpus, ci);
cpu_idle_enter();
-   while (spc-spc_whichqs == 0) {
+
+   while (curcpu_is_idle()) {
if (spc-spc_schedflags  SPCF_SHOULDHALT 
-   (spc-spc_schedflags  SPCF_HALTED) == 0) {
+(spc-spc_schedflags  SPCF_HALTED) == 0) {
cpuset_del(sched_idle_cpus, ci);
SCHED_LOCK(s);
-   atomic_setbits_int(spc-spc_schedflags,
-   spc-spc_whichqs ? 0 : SPCF_HALTED);
+   atomic_setbits_int(spc-spc_schedflags, 
SPCF_HALTED);
SCHED_UNLOCK(s);
wakeup(spc);
}
cpu_idle_cycle();
}
+
cpu_idle_leave();
cpuset_del(sched_idle_cpus, ci);
}
@@ -222,14 +232,13 @@ void
 setrunqueue(struct proc *p)
 {
struct schedstate_percpu *spc;
-   int queue = p-p_priority  2;
 
SCHED_ASSERT_LOCKED();
spc = p-p_cpu-ci_schedstate;
spc-spc_nrun++;
 
-   TAILQ_INSERT_TAIL(spc-spc_qs[queue], p, p_runq);
-   spc-spc_whichqs |= (1  queue);
+   KASSERT(!RB_FIND(prochead, spc-spc_runq, p));
+   RB_INSERT(prochead, spc-spc_runq, p);
cpuset_add(sched_queued_cpus, p-p_cpu);
 
if (cpuset_isset(sched_idle_cpus, p-p_cpu))
@@ -240,38 +249,29 @@ void
 remrunqueue(struct proc *p)
 {
struct schedstate_percpu *spc;
-   int queue = p-p_priority  2;
 
SCHED_ASSERT_LOCKED();
spc = p-p_cpu-ci_schedstate;
spc-spc_nrun--;
 
-   TAILQ_REMOVE(spc-spc_qs[queue], p, p_runq);
-   if (TAILQ_EMPTY(spc-spc_qs[queue])) {
-   spc-spc_whichqs = ~(1  queue);
-   if (spc-spc_whichqs == 0)
-   cpuset_del(sched_queued_cpus, p-p_cpu);
-   }
+   KASSERT(RB_REMOVE(prochead, spc-spc_runq, p));
+   if (RB_EMPTY(spc-spc_runq))
+   cpuset_del(sched_queued_cpus, p-p_cpu);
 }
 
 struct proc *
 sched_chooseproc(void)
 {
struct schedstate_percpu *spc = curcpu()-ci_schedstate;
-   struct proc *p;
-   int queue;
+   struct proc *p, *p_tmp = NULL;
 
SCHED_ASSERT_LOCKED();
 
if (spc-spc_schedflags  SPCF_SHOULDHALT) {
-   if (spc-spc_whichqs) {
-   for (queue = 0; queue  SCHED_NQS; queue++) {
-   TAILQ_FOREACH(p, spc-spc_qs[queue], p_runq) {
-   remrunqueue(p);
-   p-p_cpu = sched_choosecpu(p);
-   setrunqueue(p);
-   }
-   }
+   RB_FOREACH_SAFE(p, prochead, spc-spc_runq, p_tmp) {
+   remrunqueue(p);
+