> Date: Mon, 12 Jun 2023 19:09:59 -0500 > From: Scott Cheloha <scottchel...@gmail.com> > > We need to initialize the per-CPU clockintr_queue struct before we can > call clockintr_establish() from sched_init_cpu(). > > Initialization is done with a call to clockqueue_init(). Currently we > call it during clockintr_cpu_init(), i.e. each CPU initializes its own > clockintr_queue struct. > > This patch moves the clockqueue_init() call out into main() and out > into the MD code, just before sched_init_cpu(). So, now the primary > CPU initializes the clockintr_queue struct on behalf of the secondary > CPUs. > > No behavior change. > > With this in place, we can start breaking pieces off of the > hardclock() and statclock() in the next patch. > > ok?
Maybe we should consider having an MI "initialize data structures for this CPU" function at some point. But having thise next to sched_init_cpu() is fine for now. ok kettenis@ > Index: kern/init_main.c > =================================================================== > RCS file: /cvs/src/sys/kern/init_main.c,v > retrieving revision 1.320 > diff -u -p -r1.320 init_main.c > --- kern/init_main.c 1 Jan 2023 07:00:51 -0000 1.320 > +++ kern/init_main.c 12 Jun 2023 23:55:43 -0000 > @@ -47,6 +47,7 @@ > #include <sys/resourcevar.h> > #include <sys/signalvar.h> > #include <sys/systm.h> > +#include <sys/clockintr.h> > #include <sys/namei.h> > #include <sys/vnode.h> > #include <sys/tty.h> > @@ -313,6 +314,7 @@ main(void *framep) > /* Initialize run queues */ > sched_init_runqueues(); > sleep_queue_init(); > + clockqueue_init(&curcpu()->ci_queue); > sched_init_cpu(curcpu()); > p->p_cpu->ci_randseed = (arc4random() & 0x7fffffff) + 1; > > Index: kern/kern_clockintr.c > =================================================================== > RCS file: /cvs/src/sys/kern/kern_clockintr.c,v > retrieving revision 1.21 > diff -u -p -r1.21 kern_clockintr.c > --- kern/kern_clockintr.c 23 Apr 2023 00:08:36 -0000 1.21 > +++ kern/kern_clockintr.c 12 Jun 2023 23:55:43 -0000 > @@ -66,7 +66,6 @@ void clockintr_schedule(struct clockintr > void clockintr_schedule_locked(struct clockintr *, uint64_t); > void clockintr_statclock(struct clockintr *, void *); > void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *); > -void clockqueue_init(struct clockintr_queue *); > uint64_t clockqueue_next(const struct clockintr_queue *); > void clockqueue_reset_intrclock(struct clockintr_queue *); > uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t); > @@ -114,7 +113,6 @@ clockintr_cpu_init(const struct intrcloc > > KASSERT(ISSET(clockintr_flags, CL_INIT)); > > - clockqueue_init(cq); > if (ic != NULL && !ISSET(cq->cq_flags, CQ_INTRCLOCK)) { > cq->cq_intrclock = *ic; > SET(cq->cq_flags, CQ_INTRCLOCK); > Index: sys/clockintr.h > =================================================================== > RCS file: /cvs/src/sys/sys/clockintr.h,v > retrieving revision 1.7 > diff -u -p -r1.7 clockintr.h > --- sys/clockintr.h 20 Apr 2023 14:51:28 -0000 1.7 > +++ sys/clockintr.h 12 Jun 2023 23:55:43 -0000 > @@ -129,6 +129,7 @@ void clockintr_trigger(void); > * Kernel API > */ > > +void clockqueue_init(struct clockintr_queue *); > int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t); > > #endif /* _KERNEL */ > Index: arch/alpha/alpha/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/alpha/alpha/cpu.c,v > retrieving revision 1.46 > diff -u -p -r1.46 cpu.c > --- arch/alpha/alpha/cpu.c 10 Dec 2022 15:02:29 -0000 1.46 > +++ arch/alpha/alpha/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -597,6 +597,7 @@ cpu_hatch(struct cpu_info *ci) > ALPHA_TBIA(); > alpha_pal_imb(); > > + clockqueue_init(&ci->ci_queue); > KERNEL_LOCK(); > sched_init_cpu(ci); > nanouptime(&ci->ci_schedstate.spc_runtime); > Index: arch/amd64/amd64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/amd64/amd64/cpu.c,v > retrieving revision 1.168 > diff -u -p -r1.168 cpu.c > --- arch/amd64/amd64/cpu.c 24 Apr 2023 09:04:03 -0000 1.168 > +++ arch/amd64/amd64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -664,6 +664,7 @@ cpu_attach(struct device *parent, struct > #if defined(MULTIPROCESSOR) > cpu_intr_init(ci); > cpu_start_secondary(ci); > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > ncpus++; > if (ci->ci_flags & CPUF_PRESENT) { > Index: arch/arm/arm/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/arm/arm/cpu.c,v > retrieving revision 1.57 > diff -u -p -r1.57 cpu.c > --- arch/arm/arm/cpu.c 12 Mar 2022 14:40:41 -0000 1.57 > +++ arch/arm/arm/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -391,6 +391,7 @@ cpu_attach(struct device *parent, struct > "cpu-release-addr", 0); > } > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > if (cpu_hatch_secondary(ci, spinup_method, spinup_data)) { > atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFY); > Index: arch/arm64/arm64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/arm64/arm64/cpu.c,v > retrieving revision 1.94 > diff -u -p -r1.94 cpu.c > --- arch/arm64/arm64/cpu.c 11 Jun 2023 21:42:01 -0000 1.94 > +++ arch/arm64/arm64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -875,6 +875,7 @@ cpu_attach(struct device *parent, struct > "cpu-release-addr", 0); > } > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > if (cpu_start_secondary(ci, spinup_method, spinup_data)) { > atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFY); > Index: arch/hppa/dev/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/hppa/dev/cpu.c,v > retrieving revision 1.44 > diff -u -p -r1.44 cpu.c > --- arch/hppa/dev/cpu.c 6 Dec 2022 00:40:09 -0000 1.44 > +++ arch/hppa/dev/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -202,6 +202,7 @@ cpu_boot_secondary_processors(void) > > ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > > /* Release the specified CPU by triggering an EIR{0}. */ > Index: arch/i386/i386/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/i386/i386/cpu.c,v > retrieving revision 1.111 > diff -u -p -r1.111 cpu.c > --- arch/i386/i386/cpu.c 30 Jan 2023 10:49:04 -0000 1.111 > +++ arch/i386/i386/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -360,6 +360,7 @@ cpu_attach(struct device *parent, struct > #endif > cpu_tsx_disable(ci); > identifycpu(ci); > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > ci->ci_next = cpu_info_list->ci_next; > cpu_info_list->ci_next = ci; > Index: arch/luna88k/luna88k/machdep.c > =================================================================== > RCS file: /cvs/src/sys/arch/luna88k/luna88k/machdep.c,v > retrieving revision 1.142 > diff -u -p -r1.142 machdep.c > --- arch/luna88k/luna88k/machdep.c 10 Dec 2022 02:41:56 -0000 1.142 > +++ arch/luna88k/luna88k/machdep.c 12 Jun 2023 23:55:43 -0000 > @@ -762,6 +762,7 @@ secondary_main() > cpu_configuration_print(0); > ncpus++; > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > nanouptime(&ci->ci_schedstate.spc_runtime); > ci->ci_curproc = NULL; > Index: arch/macppc/macppc/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/macppc/macppc/cpu.c,v > retrieving revision 1.85 > diff -u -p -r1.85 cpu.c > --- arch/macppc/macppc/cpu.c 13 Mar 2022 12:33:01 -0000 1.85 > +++ arch/macppc/macppc/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -670,6 +670,7 @@ cpu_boot_secondary_processors(void) > continue; > ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > > cpu_spinup(NULL, ci); > Index: arch/mips64/mips64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/mips64/mips64/cpu.c,v > retrieving revision 1.82 > diff -u -p -r1.82 cpu.c > --- arch/mips64/mips64/cpu.c 6 Apr 2022 18:59:26 -0000 1.82 > +++ arch/mips64/mips64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -395,6 +395,7 @@ cpu_boot_secondary_processors(void) > continue; > > ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > cpu_boot_secondary(ci); > } > Index: arch/powerpc64/powerpc64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/powerpc64/powerpc64/cpu.c,v > retrieving revision 1.25 > diff -u -p -r1.25 cpu.c > --- arch/powerpc64/powerpc64/cpu.c 25 Jan 2023 09:53:53 -0000 1.25 > +++ arch/powerpc64/powerpc64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -189,6 +189,7 @@ cpu_attach(struct device *parent, struct > if (dev->dv_unit != 0) { > int timeout = 10000; > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > ncpus++; > > Index: arch/riscv64/riscv64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/riscv64/riscv64/cpu.c,v > retrieving revision 1.13 > diff -u -p -r1.13 cpu.c > --- arch/riscv64/riscv64/cpu.c 6 Apr 2022 18:59:27 -0000 1.13 > +++ arch/riscv64/riscv64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -193,6 +193,7 @@ cpu_attach(struct device *parent, struct > if (ci->ci_flags & CPUF_AP) { > int timeout = 10000; > > + clockqueue_init(&ci->ci_queue); > sched_init_cpu(ci); > if (cpu_hatch_secondary(ci)) { > atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFY); > Index: arch/sparc64/sparc64/cpu.c > =================================================================== > RCS file: /cvs/src/sys/arch/sparc64/sparc64/cpu.c,v > retrieving revision 1.73 > diff -u -p -r1.73 cpu.c > --- arch/sparc64/sparc64/cpu.c 24 Oct 2021 17:05:04 -0000 1.73 > +++ arch/sparc64/sparc64/cpu.c 12 Jun 2023 23:55:43 -0000 > @@ -185,6 +185,7 @@ alloc_cpuinfo(struct mainbus_attach_args > cpi->ci_self = cpi; > cpi->ci_node = ma->ma_node; > > + clockqueue_init(&cpi->ci_queue); > sched_init_cpu(cpi); > > /* > >