The refactoring below reduces the places where scheduling related
variables are written, to help future locking, and makes various
"features" of the existing logic obvious:

- `p_priority' is now modified inside setrunqueue() it becomes obvious
  that it is a placeholder for calculating the queue.

  Similarly, it becomes obvious that setrunnable() uses it as "boosted"
  sleep priority.
  Note that the bug between the priority used by resched_proc() and
  setrunqueue(), when `p_usrpri' < `p_priority' is now visible.  This
  might result in calling need_resched() without finding a thread in a
  higher runqueue (think of sndiod(8) for example).

- `p_stat' is now modified inside setrunqueue() meaning that `SRUN' is
  now always set in kern/kern_sched.c

- When NULL is passed as first argument to setrunqueue() it means pick
  any CPU.  That's interesting to see the only place this is used is
  after sleeping.  On top of stealing process, the scheduler doesn't
  try hard to make threads stick on the same CPU.  Something to
  investigate...

This refactoring is part of my previous diff to shrink SCHED_LOCK() and
should not introduce any behavior change.  That said I'd like to commit
it after release.

Comments?  Oks?

Index: arch/m88k/m88k/m88k_machdep.c
===================================================================
RCS file: /cvs/src/sys/arch/m88k/m88k/m88k_machdep.c,v
retrieving revision 1.69
diff -u -p -r1.69 m88k_machdep.c
--- arch/m88k/m88k/m88k_machdep.c       22 Oct 2018 17:31:24 -0000      1.69
+++ arch/m88k/m88k/m88k_machdep.c       5 Oct 2019 10:01:24 -0000
@@ -564,9 +564,7 @@ cpu_emergency_disable()
                 * to mi_switch().
                 */
                SCHED_LOCK(s);
-               p->p_priority = p->p_usrpri;
-               p->p_stat = SRUN;
-               setrunqueue(p);
+               setrunqueue(p->p_cpu, p, p->p_usrpri);
                p->p_ru.ru_nvcsw++;
                SCHED_UNLOCK(s);
        }
Index: kern/kern_fork.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_fork.c,v
retrieving revision 1.213
diff -u -p -r1.213 kern_fork.c
--- kern/kern_fork.c    21 Jun 2019 09:39:48 -0000      1.213
+++ kern/kern_fork.c    5 Oct 2019 10:02:33 -0000
@@ -326,12 +326,12 @@ fork_check_maxthread(uid_t uid)
 static inline void
 fork_thread_start(struct proc *p, struct proc *parent, int flags)
 {
+       struct cpu_info *ci;
        int s;
 
        SCHED_LOCK(s);
-       p->p_stat = SRUN;
-       p->p_cpu = sched_choosecpu_fork(parent, flags);
-       setrunqueue(p);
+       ci = sched_choosecpu_fork(parent, flags);
+       setrunqueue(ci, p, p->p_priority);
        SCHED_UNLOCK(s);
 }
 
Index: kern/kern_sched.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_sched.c,v
retrieving revision 1.58
diff -u -p -r1.58 kern_sched.c
--- kern/kern_sched.c   1 Jun 2019 14:11:17 -0000       1.58
+++ kern/kern_sched.c   5 Oct 2019 12:58:25 -0000
@@ -244,12 +244,21 @@ sched_init_runqueues(void)
 }
 
 void
-setrunqueue(struct proc *p)
+setrunqueue(struct cpu_info *ci, struct proc *p, uint8_t prio)
 {
        struct schedstate_percpu *spc;
-       int queue = p->p_priority >> 2;
+       int queue = prio >> 2;
+
+       if (ci == NULL)
+               ci = sched_choosecpu(p);
 
+       KASSERT(ci != NULL);
        SCHED_ASSERT_LOCKED();
+
+       p->p_cpu = ci;
+       p->p_stat = SRUN;
+       p->p_priority = prio;
+
        spc = &p->p_cpu->ci_schedstate;
        spc->spc_nrun++;
 
@@ -294,8 +303,7 @@ sched_chooseproc(void)
                        for (queue = 0; queue < SCHED_NQS; queue++) {
                                while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
                                        remrunqueue(p);
-                                       p->p_cpu = sched_choosecpu(p);
-                                       setrunqueue(p);
+                                       setrunqueue(NULL, p, p->p_priority);
                                        if (p->p_cpu == curcpu()) {
                                                KASSERT(p->p_flag & P_CPUPEG);
                                                goto again;
@@ -317,7 +325,8 @@ again:
                p = TAILQ_FIRST(&spc->spc_qs[queue]);
                remrunqueue(p);
                sched_noidle++;
-               KASSERT(p->p_stat == SRUN);
+               if (p->p_stat != SRUN)
+                       panic("thread %d not in SRUN: %d", p->p_tid, p->p_stat);
        } else if ((p = sched_steal_proc(curcpu())) == NULL) {
                p = spc->spc_idleproc;
                if (p == NULL) {
@@ -610,11 +619,8 @@ sched_peg_curproc(struct cpu_info *ci)
        int s;
 
        SCHED_LOCK(s);
-       p->p_priority = p->p_usrpri;
-       p->p_stat = SRUN;
-       p->p_cpu = ci;
        atomic_setbits_int(&p->p_flag, P_CPUPEG);
-       setrunqueue(p);
+       setrunqueue(ci, p, p->p_usrpri);
        p->p_ru.ru_nvcsw++;
        mi_switch();
        SCHED_UNLOCK(s);
Index: kern/kern_synch.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_synch.c,v
retrieving revision 1.152
diff -u -p -r1.152 kern_synch.c
--- kern/kern_synch.c   1 Oct 2019 15:08:27 -0000       1.152
+++ kern/kern_synch.c   5 Oct 2019 10:12:57 -0000
@@ -546,6 +546,7 @@ int
 sys_sched_yield(struct proc *p, void *v, register_t *retval)
 {
        struct proc *q;
+       uint8_t newprio;
        int s;
 
        SCHED_LOCK(s);
@@ -554,11 +555,10 @@ sys_sched_yield(struct proc *p, void *v,
         * sched_yield(2), drop its priority to ensure its siblings
         * can make some progress.
         */
-       p->p_priority = p->p_usrpri;
+       newprio = p->p_usrpri;
        TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
-               p->p_priority = max(p->p_priority, q->p_priority);
-       p->p_stat = SRUN;
-       setrunqueue(p);
+               newprio = max(newprio, q->p_priority);
+       setrunqueue(p->p_cpu, p, newprio);
        p->p_ru.ru_nvcsw++;
        mi_switch();
        SCHED_UNLOCK(s);
Index: kern/sched_bsd.c
===================================================================
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.55
diff -u -p -r1.55 sched_bsd.c
--- kern/sched_bsd.c    15 Jul 2019 20:44:48 -0000      1.55
+++ kern/sched_bsd.c    5 Oct 2019 13:01:44 -0000
@@ -261,8 +261,7 @@ schedcpu(void *arg)
                            (p->p_priority / SCHED_PPQ) !=
                            (p->p_usrpri / SCHED_PPQ)) {
                                remrunqueue(p);
-                               p->p_priority = p->p_usrpri;
-                               setrunqueue(p);
+                               setrunqueue(p->p_cpu, p, p->p_usrpri);
                        } else
                                p->p_priority = p->p_usrpri;
                }
@@ -310,9 +309,7 @@ yield(void)
        NET_ASSERT_UNLOCKED();
 
        SCHED_LOCK(s);
-       p->p_priority = p->p_usrpri;
-       p->p_stat = SRUN;
-       setrunqueue(p);
+       setrunqueue(p->p_cpu, p, p->p_usrpri);
        p->p_ru.ru_nvcsw++;
        mi_switch();
        SCHED_UNLOCK(s);
@@ -331,9 +328,7 @@ preempt(void)
        int s;
 
        SCHED_LOCK(s);
-       p->p_priority = p->p_usrpri;
-       p->p_stat = SRUN;
-       setrunqueue(p);
+       setrunqueue(p->p_cpu, p, p->p_usrpri);
        p->p_ru.ru_nivcsw++;
        mi_switch();
        SCHED_UNLOCK(s);
@@ -495,9 +490,7 @@ setrunnable(struct proc *p)
                unsleep(p);             /* e.g. when sending signals */
                break;
        }
-       p->p_stat = SRUN;
-       p->p_cpu = sched_choosecpu(p);
-       setrunqueue(p);
+       setrunqueue(NULL, p, p->p_priority);
        if (p->p_slptime > 1) {
                uint32_t newcpu;
 
Index: sys/sched.h
===================================================================
RCS file: /cvs/src/sys/sys/sched.h,v
retrieving revision 1.54
diff -u -p -r1.54 sched.h
--- sys/sched.h 15 Jul 2019 20:44:48 -0000      1.54
+++ sys/sched.h 5 Oct 2019 12:58:41 -0000
@@ -179,7 +179,7 @@ void sched_stop_secondary_cpus(void);
 int    cpu_is_online(struct cpu_info *);
 
 void sched_init_runqueues(void);
-void setrunqueue(struct proc *);
+void setrunqueue(struct cpu_info *, struct proc *, uint8_t);
 void remrunqueue(struct proc *);
 
 /* Inherit the parent's scheduler history */

Reply via email to