On Tue, Aug 09, 2022 at 09:54:02AM -0500, Scott Cheloha wrote:
> On Tue, Aug 09, 2022 at 02:03:31PM +0000, Visa Hankala wrote:
> > On Mon, Aug 08, 2022 at 02:52:37AM -0500, Scott Cheloha wrote:
> > > One thing I'm still uncertain about is how glxclk fits into the
> > > loongson picture. It's an interrupt clock that runs hardclock() and
> > > statclock(), but the code doesn't do any logical masking, so I don't
> > > know whether or not I need to adjust anything in that code or account
> > > for it at all. If there's no logical masking there's no deferral, so
> > > it would never call need to call md_triggerclock() from splx(9).
> >
> > I think the masking of glxclk interrupts are handled by the ISA
> > interrupt code.
>
> Do those machines not have Coprocessor 0? If they do, why would you
> prefer glxclk over CP0?
>
> > The patch misses md_triggerclock definition in mips64_machdep.c.
>
> Whoops, forgot that file. Fuller patch below.
>
> > I have put this to the test on the mips64 ports builder machines.
The machines completed a build with this patch without problems.
I tested with the debug counters removed from cp0_trigger_int5().
OK visa@
> Index: mips64/mips64/clock.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/mips64/mips64/clock.c,v
> retrieving revision 1.45
> diff -u -p -r1.45 clock.c
> --- mips64/mips64/clock.c 6 Apr 2022 18:59:26 -0000 1.45
> +++ mips64/mips64/clock.c 9 Aug 2022 14:48:47 -0000
> @@ -60,6 +60,7 @@ const struct cfattach clock_ca = {
> };
>
> void cp0_startclock(struct cpu_info *);
> +void cp0_trigger_int5(void);
> uint32_t cp0_int5(uint32_t, struct trapframe *);
>
> int
> @@ -86,19 +87,20 @@ clockattach(struct device *parent, struc
> cp0_set_compare(cp0_get_count() - 1);
>
> md_startclock = cp0_startclock;
> + md_triggerclock = cp0_trigger_int5;
> }
>
> /*
> * Interrupt handler for targets using the internal count register
> * as interval clock. Normally the system is run with the clock
> * interrupt always enabled. Masking is done here and if the clock
> - * can not be run the tick is just counted and handled later when
> - * the clock is logically unmasked again.
> + * cannot be run the tick is handled later when the clock is logically
> + * unmasked again.
> */
> uint32_t
> cp0_int5(uint32_t mask, struct trapframe *tf)
> {
> - u_int32_t clkdiff;
> + u_int32_t clkdiff, pendingticks = 0;
> struct cpu_info *ci = curcpu();
>
> /*
> @@ -113,15 +115,26 @@ cp0_int5(uint32_t mask, struct trapframe
> }
>
> /*
> + * If the clock interrupt is masked, defer any work until it
> + * is unmasked from splx(9).
> + */
> + if (tf->ipl >= IPL_CLOCK) {
> + ci->ci_clock_deferred = 1;
> + cp0_set_compare(cp0_get_count() - 1);
> + return CR_INT_5;
> + }
> + ci->ci_clock_deferred = 0;
> +
> + /*
> * Count how many ticks have passed since the last clock interrupt...
> */
> clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
> while (clkdiff >= ci->ci_cpu_counter_interval) {
> ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
> clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
> - ci->ci_pendingticks++;
> + pendingticks++;
> }
> - ci->ci_pendingticks++;
> + pendingticks++;
> ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
>
> /*
> @@ -132,32 +145,64 @@ cp0_int5(uint32_t mask, struct trapframe
> clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
> if ((int)clkdiff >= 0) {
> ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
> - ci->ci_pendingticks++;
> + pendingticks++;
> cp0_set_compare(ci->ci_cpu_counter_last);
> }
>
> /*
> - * Process clock interrupt unless it is currently masked.
> + * Process clock interrupt.
> */
> - if (tf->ipl < IPL_CLOCK) {
> #ifdef MULTIPROCESSOR
> - register_t sr;
> + register_t sr;
>
> - sr = getsr();
> - ENABLEIPI();
> + sr = getsr();
> + ENABLEIPI();
> #endif
> - while (ci->ci_pendingticks) {
> - atomic_inc_long(
> - (unsigned long *)&cp0_clock_count.ec_count);
> - hardclock(tf);
> - ci->ci_pendingticks--;
> - }
> + while (pendingticks) {
> + atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count);
> + hardclock(tf);
> + pendingticks--;
> + }
> #ifdef MULTIPROCESSOR
> - setsr(sr);
> + setsr(sr);
> #endif
> - }
>
> return CR_INT_5; /* Clock is always on 5 */
> +}
> +
> +unsigned long cp0_raise_calls, cp0_raise_miss;
> +
> +/*
> + * Trigger the clock interrupt.
> + *
> + * We need to spin until either (a) INT5 is pending or (b) the compare
> + * register leads the count register, i.e. we know INT5 will be pending
> + * very soon.
> + *
> + * To ensure we don't spin forever, double the compensatory offset
> + * added to the compare value every time we miss the count register.
> + */
> +void
> +cp0_trigger_int5(void)
> +{
> + uint32_t compare, offset = 16;
> + int leading = 0;
> + register_t sr;
> +
> + sr = disableintr();
> + while (!leading && !ISSET(cp0_get_cause(), CR_INT_5)) {
> + compare = cp0_get_count() + offset;
> + cp0_set_compare(compare);
> + leading = (int32_t)(compare - cp0_get_count()) > 0;
> + offset *= 2;
> + }
> + setsr(sr);
> +
> + unsigned long misses = 0;
> + for (; offset > 32; offset /= 2)
> + misses++;
> + atomic_add_long(&cp0_raise_miss, misses);
> + atomic_inc_long(&cp0_raise_calls);
> }
>
> /*
> Index: mips64/mips64/cp0access.S
> ===================================================================
> RCS file: /cvs/src/sys/arch/mips64/mips64/cp0access.S,v
> retrieving revision 1.23
> diff -u -p -r1.23 cp0access.S
> --- mips64/mips64/cp0access.S 1 May 2021 16:11:11 -0000 1.23
> +++ mips64/mips64/cp0access.S 9 Aug 2022 14:48:47 -0000
> @@ -198,3 +198,10 @@ LEAF(cpu_rnd_messybits, 0)
> j ra
> NOP
> END(cpu_rnd_messybits)
> +
> +LEAF(cp0_get_cause, 0)
> + MFC0 v0, COP_0_CAUSE_REG
> + MFC0_HAZARD
> + j ra
> + NOP
> +END(cp0_get_cause)
> Index: mips64/mips64/mips64_machdep.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/mips64/mips64/mips64_machdep.c,v
> retrieving revision 1.37
> diff -u -p -r1.37 mips64_machdep.c
> --- mips64/mips64/mips64_machdep.c 1 May 2021 16:11:11 -0000 1.37
> +++ mips64/mips64/mips64_machdep.c 9 Aug 2022 14:48:48 -0000
> @@ -219,6 +219,7 @@ tlb_asid_wrap(struct cpu_info *ci)
> */
>
> void (*md_startclock)(struct cpu_info *);
> +void (*md_triggerclock)(void);
>
> extern todr_chip_handle_t todr_handle;
>
> Index: mips64/include/cpu.h
> ===================================================================
> RCS file: /cvs/src/sys/arch/mips64/include/cpu.h,v
> retrieving revision 1.138
> diff -u -p -r1.138 cpu.h
> --- mips64/include/cpu.h 28 Jan 2022 16:20:09 -0000 1.138
> +++ mips64/include/cpu.h 9 Aug 2022 14:48:48 -0000
> @@ -178,11 +178,10 @@ struct cpu_info {
> volatile int ci_ipl; /* software IPL */
> uint32_t ci_softpending; /* pending soft interrupts */
> int ci_clock_started;
> + volatile int ci_clock_deferred; /* clock interrupt postponed */
> u_int32_t ci_cpu_counter_last; /* last compare value loaded */
> u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */
>
> - u_int32_t ci_pendingticks;
> -
> struct pmap *ci_curpmap;
> uint ci_intrdepth; /* interrupt depth */
> #ifdef MULTIPROCESSOR
> @@ -258,6 +257,7 @@ void smp_rendezvous_cpus(unsigned long,
> #define CPU_BUSY_CYCLE() do {} while (0)
>
> extern void (*md_startclock)(struct cpu_info *);
> +extern void (*md_triggerclock)(void);
> void cp0_calibrate(struct cpu_info *);
>
> unsigned int cpu_rnd_messybits(void);
> @@ -447,6 +447,7 @@ register_t disableintr(void);
> register_t getsr(void);
> register_t setsr(register_t);
>
> +uint32_t cp0_get_cause(void);
> u_int cp0_get_count(void);
> register_t cp0_get_config(void);
> uint32_t cp0_get_config_1(void);
> Index: octeon/dev/octcit.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/octeon/dev/octcit.c,v
> retrieving revision 1.12
> diff -u -p -r1.12 octcit.c
> --- octeon/dev/octcit.c 1 Sep 2019 12:16:01 -0000 1.12
> +++ octeon/dev/octcit.c 9 Aug 2022 14:48:48 -0000
> @@ -489,6 +489,10 @@ octcit_splx(int newipl)
> (void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
> }
>
> + /* Trigger deferred clock interrupt if it is now unmasked. */
> + if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
> + md_triggerclock();
> +
> /* If we still have softints pending trigger processing. */
> if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
> setsoftintr0();
> Index: octeon/dev/octciu.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/octeon/dev/octciu.c,v
> retrieving revision 1.17
> diff -u -p -r1.17 octciu.c
> --- octeon/dev/octciu.c 1 Sep 2019 12:16:01 -0000 1.17
> +++ octeon/dev/octciu.c 9 Aug 2022 14:48:48 -0000
> @@ -588,6 +588,10 @@ octciu_splx(int newipl)
> scpu->scpu_ibank[2].en,
> scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]);
>
> + /* Trigger deferred clock interrupt if it is now unmasked. */
> + if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
> + md_triggerclock();
> +
> /* If we still have softints pending trigger processing. */
> if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
> setsoftintr0();
> Index: loongson/dev/bonito.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/loongson/dev/bonito.c,v
> retrieving revision 1.35
> diff -u -p -r1.35 bonito.c
> --- loongson/dev/bonito.c 11 Mar 2021 11:16:57 -0000 1.35
> +++ loongson/dev/bonito.c 9 Aug 2022 14:48:48 -0000
> @@ -485,6 +485,11 @@ bonito_splx(int newipl)
> /* Update masks to new ipl. Order highly important! */
> ci->ci_ipl = newipl;
> bonito_setintrmask(newipl);
> +
> + /* Trigger deferred clock interrupt if it is now unmasked. */
> + if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
> + md_triggerclock();
> +
> /* If we still have softints pending trigger processing. */
> if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
> setsoftintr0();
> Index: loongson/loongson/loongson3_intr.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/loongson/loongson/loongson3_intr.c,v
> retrieving revision 1.7
> diff -u -p -r1.7 loongson3_intr.c
> --- loongson/loongson/loongson3_intr.c 24 Feb 2018 11:42:31 -0000
> 1.7
> +++ loongson/loongson/loongson3_intr.c 9 Aug 2022 14:48:48 -0000
> @@ -355,6 +355,10 @@ loongson3_splx(int newipl)
> REGVAL(LS3_IRT_INTENSET(0)) =
> loongson3_intem & ~loongson3_imask[newipl];
>
> + /* Trigger deferred clock interrupt if it is now unmasked. */
> + if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
> + md_triggerclock();
> +
> if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
> setsoftintr0();
> }
>