This patch switches sparc64 to clockintr(9). mlarkin@ has tested it in an LDOM on the T4-1 in his garage. It survived a parallel release build and an upgrade from the resulting bsd.rd.
This needs more testing. Testing on a machine without %SYS_TICK or %SYS_TICK_COMPARE (UltraSPARC I?) would be helpful. Testing on the UltraSPARC IIe ("Hummingbird") would also be helpful. Apparently it has %SYS_TICK and %SYS_TICK_COMPARE, but in an unusual hardware configuration. I imagine this machine is a bit rare, though. I am under the impression that we only support sparc64 machines with the %TICK_COMPARE register. If this is not the case, and we actually support the early HAL CPUs without %TICK_COMPARE, please speak up. They aren't listed here: https://www.openbsd.org/sparc64.html Notes: - All sparc64 machines now have a randomized statclock(). - This patch disables timer(4/sparc64). If the device has no other utility we can remove the driver in a separate patch. Index: sys/arch/sparc64/include/cpu.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/cpu.h,v retrieving revision 1.100 diff -u -p -r1.100 cpu.h --- sys/arch/sparc64/include/cpu.h 22 Oct 2022 20:09:41 -0000 1.100 +++ sys/arch/sparc64/include/cpu.h 6 Nov 2022 19:25:21 -0000 @@ -78,6 +78,7 @@ #include <machine/psl.h> #include <machine/reg.h> +#include <sys/clockintr.h> #include <sys/sched.h> #include <sys/srp.h> @@ -129,7 +130,7 @@ struct cpu_info { int ci_want_resched; int ci_handled_intr_level; void *ci_intrpending[16][8]; - u_int64_t ci_tick; + struct clockintr_queue ci_queue; struct intrhand ci_tickintr; volatile int ci_ddb_paused; Index: sys/arch/sparc64/include/_types.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/_types.h,v retrieving revision 1.23 diff -u -p -r1.23 _types.h --- sys/arch/sparc64/include/_types.h 5 Mar 2018 01:15:25 -0000 1.23 +++ sys/arch/sparc64/include/_types.h 6 Nov 2022 19:25:21 -0000 @@ -35,6 +35,8 @@ #ifndef _MACHINE__TYPES_H_ #define _MACHINE__TYPES_H_ +#define __HAVE_CLOCKINTR + #if defined(_KERNEL) typedef struct label_t { long val[2]; Index: sys/arch/sparc64/sparc64/clock.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/clock.c,v retrieving revision 1.71 diff -u -p -r1.71 clock.c --- sys/arch/sparc64/sparc64/clock.c 24 Oct 2021 17:05:04 -0000 1.71 +++ sys/arch/sparc64/sparc64/clock.c 6 Nov 2022 19:25:21 -0000 @@ -65,6 +65,7 @@ #include <sys/param.h> #include <sys/kernel.h> +#include <sys/clockintr.h> #include <sys/device.h> #include <sys/proc.h> #include <sys/resourcevar.h> @@ -74,6 +75,7 @@ #include <sys/gmon.h> #endif #include <sys/sched.h> +#include <sys/stdint.h> #include <sys/timetc.h> #include <sys/atomic.h> @@ -132,19 +134,35 @@ struct timecounter sys_tick_timecounter .tc_user = TC_SYS_TICK, }; -/* - * Statistics clock interval and variance, in usec. Variance must be a - * power of two. Since this gives us an even number, not an odd number, - * we discard one case and compensate. That is, a variance of 1024 would - * give us offsets in [0..1023]. Instead, we take offsets in [1..1023]. - * This is symmetric about the point 512, or statvar/2, and thus averages - * to that value (assuming uniform random numbers). - */ -/* XXX fix comment to match value */ -int statvar = 8192; -int statmin; /* statclock interval - 1/2*variance */ +uint64_t tick_nsec_cycle_ratio; +uint64_t tick_nsec_max; + +void tick_rearm(void *, uint64_t); +void tick_trigger(void *); + +const struct intrclock tick_intrclock = { + .ic_rearm = tick_rearm, + .ic_trigger = tick_trigger +}; + +uint64_t sys_tick_nsec_cycle_ratio; +uint64_t sys_tick_nsec_max; -static long tick_increment; +void sys_tick_rearm(void *, uint64_t); +void sys_tick_trigger(void *); + +const struct intrclock sys_tick_intrclock = { + .ic_rearm = sys_tick_rearm, + .ic_trigger = sys_tick_trigger +}; + +void stick_rearm(void *, uint64_t); +void stick_trigger(void *); + +const struct intrclock stick_intrclock = { + .ic_rearm = stick_rearm, + .ic_trigger = stick_trigger +}; void tick_start(void); void sys_tick_start(void); @@ -153,12 +171,8 @@ void stick_start(void); int tickintr(void *); int sys_tickintr(void *); int stickintr(void *); -int schedintr(void *); -static struct intrhand level10 = { clockintr }; static struct intrhand level0 = { tickintr }; -static struct intrhand level14 = { statintr }; -static struct intrhand schedint = { schedintr }; /* * clock (eeprom) attaches at the sbus or the ebus (PCI) @@ -473,21 +487,7 @@ timerattach(struct device *parent, struc timerreg_4u.t_clrintr = (int64_t *)(u_long)va[1]; timerreg_4u.t_mapintr = (int64_t *)(u_long)va[2]; - /* Install the appropriate interrupt vector here */ - level10.ih_number = INTVEC(ma->ma_interrupts[0]); - level10.ih_clr = (void *)&timerreg_4u.t_clrintr[0]; - level10.ih_map = (void *)&timerreg_4u.t_mapintr[0]; - strlcpy(level10.ih_name, "clock", sizeof(level10.ih_name)); - intr_establish(10, &level10); - - level14.ih_number = INTVEC(ma->ma_interrupts[1]); - level14.ih_clr = (void *)&timerreg_4u.t_clrintr[1]; - level14.ih_map = (void *)&timerreg_4u.t_mapintr[1]; - strlcpy(level14.ih_name, "prof", sizeof(level14.ih_name)); - intr_establish(14, &level14); - - printf(" ivec 0x%llx, 0x%llx\n", INTVEC(level10.ih_number), - INTVEC(level14.ih_number)); + printf(" ivec (none)\n"); } void @@ -538,7 +538,7 @@ myetheraddr(u_char *cp) void cpu_initclocks(void) { - int statint, minint; + struct cpu_info *ci; #ifdef DEBUG extern int intrdebug; #endif @@ -562,6 +562,14 @@ cpu_initclocks(void) tick_nsec = 1000000000 / hz; } + if (stathz == 0) + stathz = hz; + if (1000000 % stathz) { + printf("cannot get %d Hz statclock; using 100 Hz\n", stathz); + stathz = 100; + } + profhz = stathz * 10; + /* Make sure we have a sane cpu_clockrate -- we'll need it */ if (!cpu_clockrate) /* Default to 200MHz clock XXXXX */ @@ -570,6 +578,9 @@ cpu_initclocks(void) tick_timecounter.tc_frequency = cpu_clockrate; tc_init(&tick_timecounter); + tick_nsec_cycle_ratio = cpu_clockrate * (1ULL << 32) / 1000000000; + tick_nsec_max = UINT64_MAX / tick_nsec_cycle_ratio; + /* * UltraSPARC IIe processors do have a STICK register, but it * lives on the PCI host bridge and isn't accessible through @@ -582,159 +593,49 @@ cpu_initclocks(void) if (sys_tick_rate > 0 && impl != IMPL_HUMMINGBIRD) { sys_tick_timecounter.tc_frequency = sys_tick_rate; tc_init(&sys_tick_timecounter); - } - /* - * Now handle machines w/o counter-timers. - */ - - if (!timerreg_4u.t_timer || !timerreg_4u.t_clrintr) { - struct cpu_info *ci; - - /* We don't have a counter-timer -- use %tick */ - level0.ih_clr = 0; - - /* - * Establish a level 10 interrupt handler - * - * We will have a conflict with the softint handler, - * so we set the ih_number to 1. - */ - level0.ih_number = 1; - strlcpy(level0.ih_name, "clock", sizeof(level0.ih_name)); - intr_establish(10, &level0); - - /* We only have one timer so we have no statclock */ - stathz = 0; - - if (sys_tick_rate > 0) { - tick_increment = sys_tick_rate / hz; - if (impl == IMPL_HUMMINGBIRD) { - level0.ih_fun = stickintr; - cpu_start_clock = stick_start; - } else { - level0.ih_fun = sys_tickintr; - cpu_start_clock = sys_tick_start; - } - } else { - /* set the next interrupt time */ - tick_increment = cpu_clockrate / hz; - level0.ih_fun = tickintr; - cpu_start_clock = tick_start; - } - - for (ci = cpus; ci != NULL; ci = ci->ci_next) - memcpy(&ci->ci_tickintr, &level0, sizeof(level0)); - - cpu_start_clock(); - - return; + sys_tick_nsec_cycle_ratio = + sys_tick_rate * (1ULL << 32) / 1000000000; + sys_tick_nsec_max = UINT64_MAX / sys_tick_nsec_cycle_ratio; } - if (stathz == 0) - stathz = hz; - if (1000000 % stathz) { - printf("cannot get %d Hz statclock; using 100 Hz\n", stathz); - stathz = 100; - } + clockintr_init(CL_RNDSTAT); - profhz = stathz; /* always */ - - statint = 1000000 / stathz; - minint = statint / 2 + 100; - while (statvar > minint) - statvar >>= 1; + level0.ih_clr = 0; /* - * Establish scheduler softint. - */ - schedint.ih_pil = PIL_SCHED; - schedint.ih_clr = NULL; - schedint.ih_arg = 0; - schedint.ih_pending = 0; - schedhz = stathz/4; - - /* - * Enable timers + * Establish a level 10 interrupt handler * - * Also need to map the interrupts cause we're not a child of the sbus. - * N.B. By default timer[0] is disabled and timer[1] is enabled. + * We will have a conflict with the softint handler, + * so we set the ih_number to 1. */ - stxa((vaddr_t)&timerreg_4u.t_timer[0].t_limit, ASI_NUCLEUS, - tmr_ustolim(tick)|TMR_LIM_IEN|TMR_LIM_PERIODIC|TMR_LIM_RELOAD); - stxa((vaddr_t)&timerreg_4u.t_mapintr[0], ASI_NUCLEUS, - timerreg_4u.t_mapintr[0]|INTMAP_V); - -#ifdef DEBUG - if (intrdebug) - /* Neglect to enable timer */ - stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, - tmr_ustolim(statint)|TMR_LIM_RELOAD); - else -#endif - stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, - tmr_ustolim(statint)|TMR_LIM_IEN|TMR_LIM_RELOAD); - stxa((vaddr_t)&timerreg_4u.t_mapintr[1], ASI_NUCLEUS, - timerreg_4u.t_mapintr[1]|INTMAP_V); + level0.ih_number = 1; + strlcpy(level0.ih_name, "clock", sizeof(level0.ih_name)); + intr_establish(10, &level0); + + if (sys_tick_rate > 0) { + if (impl == IMPL_HUMMINGBIRD) { + level0.ih_fun = stickintr; + cpu_start_clock = stick_start; + } else { + level0.ih_fun = sys_tickintr; + cpu_start_clock = sys_tick_start; + } + } else { + level0.ih_fun = tickintr; + cpu_start_clock = tick_start; + } - statmin = statint - (statvar >> 1); + for (ci = cpus; ci != NULL; ci = ci->ci_next) + memcpy(&ci->ci_tickintr, &level0, sizeof(level0)); - tick_enable(); + cpu_start_clock(); } -/* - * Dummy setstatclockrate(), since we know profhz==hz. - */ void setstatclockrate(int newhz) { - /* nothing */ -} - -/* - * Level 10 (clock) interrupts. If we are using the FORTH PROM for - * console input, we need to check for that here as well, and generate - * a software interrupt to read it. - */ -#ifdef DEBUG -static int clockcheck = 0; -#endif -int -clockintr(void *cap) -{ -#ifdef DEBUG - static int64_t tick_base = 0; - struct timeval ctime; - int64_t t; - - t = tick() & TICK_TICKS; - - microtime(&ctime); - if (!tick_base) { - tick_base = (ctime.tv_sec * 1000000LL + ctime.tv_usec) - * 1000000LL / cpu_clockrate; - tick_base -= t; - } else if (clockcheck) { - int64_t tk = t; - int64_t clk = (ctime.tv_sec * 1000000LL + ctime.tv_usec); - t -= tick_base; - t = t * 1000000LL / cpu_clockrate; - if (t - clk > hz) { - printf("Clock lost an interrupt!\n"); - printf("Actual: %llx Expected: %llx tick %llx " - "tick_base %llx\n", (long long)t, (long long)clk, - (long long)tk, (long long)tick_base); -#ifdef DDB - db_enter(); -#endif - tick_base = 0; - } - } -#endif - /* Let locore.s clear the interrupt for us. */ - hardclock((struct clockframe *)cap); - - return (1); + clockintr_setstatclockrate(newhz); } /* @@ -748,180 +649,64 @@ clockintr(void *cap) int tickintr(void *cap) { - struct cpu_info *ci = curcpu(); - u_int64_t s; - - /* - * No need to worry about overflow; %tick is architecturally - * defined not to do that for at least 10 years. - */ - while (ci->ci_tick < tick()) { - ci->ci_tick += tick_increment; - hardclock((struct clockframe *)cap); - atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); - } - - /* Reset the interrupt. */ - s = intr_disable(); - tickcmpr_set(ci->ci_tick); - intr_restore(s); - + clockintr_dispatch(cap); + atomic_inc_long((unsigned long *)&level0.ih_count.ec_count); return (1); } int sys_tickintr(void *cap) { - struct cpu_info *ci = curcpu(); - u_int64_t s; - - /* - * Do we need to worry about overflow here? - */ - while (ci->ci_tick < sys_tick()) { - ci->ci_tick += tick_increment; - hardclock((struct clockframe *)cap); - atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); - } - - /* Reset the interrupt. */ - s = intr_disable(); - sys_tickcmpr_set(ci->ci_tick); - intr_restore(s); - + clockintr_dispatch(cap); + atomic_inc_long((unsigned long *)&level0.ih_count.ec_count); return (1); } int stickintr(void *cap) { - struct cpu_info *ci = curcpu(); - u_int64_t s; - - /* - * Do we need to worry about overflow here? - */ - while (ci->ci_tick < stick()) { - ci->ci_tick += tick_increment; - hardclock((struct clockframe *)cap); - atomic_add_long((unsigned long *)&level0.ih_count.ec_count, 1); - } - - /* Reset the interrupt. */ - s = intr_disable(); - stickcmpr_set(ci->ci_tick); - intr_restore(s); - - return (1); -} - -/* - * Level 14 (stat clock) interrupt handler. - */ -int -statintr(void *cap) -{ - u_long newint, r, var; - struct cpu_info *ci = curcpu(); - -#ifdef NOT_DEBUG - printf("statclock: count %x:%x, limit %x:%x\n", - timerreg_4u.t_timer[1].t_count, timerreg_4u.t_timer[1].t_limit); -#endif -#ifdef NOT_DEBUG - prom_printf("!"); -#endif - statclock((struct clockframe *)cap); -#ifdef NOTDEF_DEBUG - /* Don't re-schedule the IRQ */ - return 1; -#endif - /* - * Compute new randomized interval. The intervals are uniformly - * distributed on [statint - statvar / 2, statint + statvar / 2], - * and therefore have mean statint, giving a stathz frequency clock. - */ - var = statvar; - do { - r = random() & (var - 1); - } while (r == 0); - newint = statmin + r; - - if (schedhz) - if ((++ci->ci_schedstate.spc_schedticks & 3) == 0) - send_softint(-1, PIL_SCHED, &schedint); - stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, - tmr_ustolim(newint)|TMR_LIM_IEN|TMR_LIM_RELOAD); - - return (1); -} - -int -schedintr(void *arg) -{ - if (curproc) - schedclock(curproc); + clockintr_dispatch(cap); + atomic_inc_long((unsigned long *)&level0.ih_count.ec_count); return (1); } void tick_start(void) { - struct cpu_info *ci = curcpu(); u_int64_t s; - tick_enable(); + clockintr_cpu_init(&tick_intrclock); - /* - * Try to make the tick interrupts as synchronously as possible on - * all CPUs to avoid inaccuracies for migrating processes. - */ + tick_enable(); - s = intr_disable(); - ci->ci_tick = roundup(tick(), tick_increment); - tickcmpr_set(ci->ci_tick); - intr_restore(s); + clockintr_trigger(); } void sys_tick_start(void) { - struct cpu_info *ci = curcpu(); u_int64_t s; + clockintr_cpu_init(&sys_tick_intrclock); + if (CPU_ISSUN4U || CPU_ISSUN4US) { tick_enable(); sys_tick_enable(); } - /* - * Try to make the tick interrupts as synchronously as possible on - * all CPUs to avoid inaccuracies for migrating processes. - */ - - s = intr_disable(); - ci->ci_tick = roundup(sys_tick(), tick_increment); - sys_tickcmpr_set(ci->ci_tick); - intr_restore(s); + clockintr_trigger(); } void stick_start(void) { - struct cpu_info *ci = curcpu(); u_int64_t s; - tick_enable(); + clockintr_cpu_init(&stick_intrclock); - /* - * Try to make the tick interrupts as synchronously as possible on - * all CPUs to avoid inaccuracies for migrating processes. - */ + tick_enable(); - s = intr_disable(); - ci->ci_tick = roundup(stick(), tick_increment); - stickcmpr_set(ci->ci_tick); - intr_restore(s); + clockintr_trigger(); } u_int @@ -942,4 +727,76 @@ sys_tick_get_timecount(struct timecounte __asm volatile("rd %%sys_tick, %0" : "=r" (tick)); return (tick & ~0u); +} + +void +tick_rearm(void *unused, uint64_t nsecs) +{ + uint64_t s; + uint32_t cycles; + + if (nsecs > tick_nsec_max) + nsecs = tick_nsec_max; + cycles = (nsecs * tick_nsec_cycle_ratio) >> 32; + s = intr_disable(); + tickcmpr_set(tick() + cycles); + intr_restore(s); +} + +void +tick_trigger(void *unused) +{ + uint64_t s; + + s = intr_disable() + tickcmpr_set(tick()); + intr_restore(s); +} + +void +sys_tick_rearm(void *unused, uint64_t nsecs) +{ + uint64_t s; + uint32_t cycles; + + if (nsecs > sys_tick_nsec_max) + nsecs = sys_tick_nsec_max; + cycles = (nsecs * sys_tick_nsec_cycle_ratio) >> 32; + s = intr_disable(); + sys_tickcmpr_set(sys_tick() + cycles); + intr_restore(s); +} + +void +sys_tick_trigger(void *unused) +{ + uint64_t s; + + s = intr_disable() + sys_tickcmpr_set(sys_tick()); + intr_restore(s); +} + +void +stick_rearm(void *unused, uint64_t nsecs) +{ + uint64_t s; + uint32_t cycles; + + if (nsecs > sys_tick_nsec_max) + nsecs = sys_tick_nsec_max; + cycles = (nsecs * sys_tick_nsec_cycle_ratio) >> 32; + s = intr_disable(); + stickcmpr_set(stick() + cycles); + intr_restore(s); +} + +void +stick_trigger(void *unused) +{ + uint64_t s; + + s = intr_disable() + stickcmpr_set(stick()); + intr_restore(s); }