Module Name:    src
Committed By:   thorpej
Date:           Wed Sep 16 04:07:32 UTC 2020

Modified Files:
        src/sys/arch/alpha/alpha: genassym.cf interrupt.c locore.s
        src/sys/arch/alpha/include: cpu.h intr.h types.h

Log Message:
Implement fast soft interrupts for Alpha.  It's not yet enabled, because
there is a bug lurking that causes problems when user space starts up,
so we'll stick with the slow path for now.


To generate a diff of this commit:
cvs rdiff -u -r1.25 -r1.26 src/sys/arch/alpha/alpha/genassym.cf
cvs rdiff -u -r1.84 -r1.85 src/sys/arch/alpha/alpha/interrupt.c
cvs rdiff -u -r1.132 -r1.133 src/sys/arch/alpha/alpha/locore.s
cvs rdiff -u -r1.95 -r1.96 src/sys/arch/alpha/include/cpu.h
cvs rdiff -u -r1.75 -r1.76 src/sys/arch/alpha/include/intr.h
cvs rdiff -u -r1.57 -r1.58 src/sys/arch/alpha/include/types.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/alpha/alpha/genassym.cf
diff -u src/sys/arch/alpha/alpha/genassym.cf:1.25 src/sys/arch/alpha/alpha/genassym.cf:1.26
--- src/sys/arch/alpha/alpha/genassym.cf:1.25	Sat Sep  5 18:01:42 2020
+++ src/sys/arch/alpha/alpha/genassym.cf	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.25 2020/09/05 18:01:42 thorpej Exp $
+# $NetBSD: genassym.cf,v 1.26 2020/09/16 04:07:32 thorpej Exp $
 
 #
 # Copyright (c) 1982, 1990, 1993
@@ -130,6 +130,7 @@ define	ALPHA_PSL_USERMODE	ALPHA_PSL_USER
 define	ALPHA_PSL_IPL_MASK	ALPHA_PSL_IPL_MASK
 define	ALPHA_PSL_IPL_0		ALPHA_PSL_IPL_0
 define	ALPHA_PSL_IPL_SOFT_LO	ALPHA_PSL_IPL_SOFT_LO
+define	ALPHA_PSL_IPL_SOFT_HI	ALPHA_PSL_IPL_SOFT_HI
 define	ALPHA_PSL_IPL_HIGH	ALPHA_PSL_IPL_HIGH
 
 # pte bits
@@ -189,4 +190,5 @@ define	SYS_exit		SYS_exit
 define	CPU_INFO_CURLWP		offsetof(struct cpu_info, ci_curlwp)
 define	CPU_INFO_IDLE_LWP	offsetof(struct cpu_info, ci_data.cpu_idlelwp)
 define	CPU_INFO_SSIR		offsetof(struct cpu_info, ci_ssir)
+define	CPU_INFO_MTX_COUNT	offsetof(struct cpu_info, ci_mtx_count)
 define	CPU_INFO_SIZEOF		sizeof(struct cpu_info)

Index: src/sys/arch/alpha/alpha/interrupt.c
diff -u src/sys/arch/alpha/alpha/interrupt.c:1.84 src/sys/arch/alpha/alpha/interrupt.c:1.85
--- src/sys/arch/alpha/alpha/interrupt.c:1.84	Sat Sep  5 18:01:42 2020
+++ src/sys/arch/alpha/alpha/interrupt.c	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: interrupt.c,v 1.85 2020/09/16 04:07:32 thorpej Exp $ */
 
 /*-
  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.84 2020/09/05 18:01:42 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.85 2020/09/16 04:07:32 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -450,47 +450,142 @@ badaddr_read(void *addr, size_t size, vo
 	return (rv);
 }
 
+#ifdef __HAVE_FAST_SOFTINTS
+
+#define	SOFTINT_CLOCK_MASK	__BIT(SOFTINT_CLOCK)
+#define	SOFTINT_BIO_MASK	__BIT(SOFTINT_BIO)
+#define	SOFTINT_NET_MASK	__BIT(SOFTINT_NET)
+#define	SOFTINT_SERIAL_MASK	__BIT(SOFTINT_SERIAL)
+
+#define	ALPHA_IPL1_SOFTINTS	(SOFTINT_CLOCK_MASK | SOFTINT_BIO_MASK)
+#define	ALPHA_IPL2_SOFTINTS	(SOFTINT_NET_MASK | SOFTINT_SERIAL_MASK)
+
+#define	ALPHA_ALL_SOFTINTS	(ALPHA_IPL1_SOFTINTS | ALPHA_IPL2_SOFTINTS)
+
+#define	SOFTINT_TO_IPL(si)						\
+	(ALPHA_PSL_IPL_SOFT_LO + ((ALPHA_IPL2_SOFTINTS >> (si)) & 1))
+
+#define	SOFTINTS_ELIGIBLE(ipl)						\
+	((ALPHA_ALL_SOFTINTS << ((ipl) << 1)) & ALPHA_ALL_SOFTINTS)
+
+/* Validate some assumptions the code makes. */
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_CLOCK) == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_BIO) == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_NET) == ALPHA_PSL_IPL_SOFT_HI);
+__CTASSERT(SOFTINT_TO_IPL(SOFTINT_SERIAL) == ALPHA_PSL_IPL_SOFT_HI);
+
+__CTASSERT(IPL_SOFTCLOCK == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(IPL_SOFTBIO == ALPHA_PSL_IPL_SOFT_LO);
+__CTASSERT(IPL_SOFTNET == ALPHA_PSL_IPL_SOFT_HI);
+__CTASSERT(IPL_SOFTSERIAL == ALPHA_PSL_IPL_SOFT_HI);
+
+__CTASSERT(SOFTINT_CLOCK_MASK & 0x3);
+__CTASSERT(SOFTINT_BIO_MASK & 0x3);
+__CTASSERT(SOFTINT_NET_MASK & 0xc);
+__CTASSERT(SOFTINT_SERIAL_MASK & 0xc);
+__CTASSERT(SOFTINT_COUNT == 4);
+
+__CTASSERT((ALPHA_ALL_SOFTINTS & ~0xfUL) == 0);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_NONE) == ALPHA_ALL_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTCLOCK) == ALPHA_IPL2_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTBIO) == ALPHA_IPL2_SOFTINTS);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTNET) == 0);
+__CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTSERIAL) == 0);
+
 /*
- * spllower:
+ * softint_trigger:
  *
- *	Lower interrupt priority.  May need to check for software
- *	interrupts.
+ *	Trigger a soft interrupt.
  */
 void
-spllower(int ipl)
+softint_trigger(uintptr_t const machdep)
 {
+	/* No need for an atomic; called at splhigh(). */
+	KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_HIGH);
+	curcpu()->ci_ssir |= machdep;
+}
 
-	if (ipl == ALPHA_PSL_IPL_0 && curcpu()->ci_ssir) {
-		(void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT_LO);
-		softintr_dispatch();
-	}
-	(void) alpha_pal_swpipl(ipl);
+/*
+ * softint_init_md:
+ *
+ *	Machine-dependent initialization for a fast soft interrupt thread.
+ */
+void
+softint_init_md(lwp_t * const l, u_int const level, uintptr_t * const machdep)
+{
+	lwp_t ** lp = &l->l_cpu->ci_silwps[level];
+	KASSERT(*lp == NULL || *lp == l);
+	*lp = l;
+
+	const uintptr_t si_bit = __BIT(level);
+	KASSERT(si_bit & ALPHA_ALL_SOFTINTS);
+	*machdep = si_bit;
 }
 
 /*
- * softintr_dispatch:
+ * Helper macro.
+ *
+ * Dispatch a softint and then restart the loop so that higher
+ * priority softints are always done first.
+ */
+#define	DOSOFTINT(level)						\
+	if (ssir & SOFTINT_##level##_MASK) {				\
+		ci->ci_ssir &= ~SOFTINT_##level##_MASK;			\
+		alpha_softint_switchto(l, IPL_SOFT##level,		\
+		    ci->ci_silwps[SOFTINT_##level]);			\
+		KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) ==	\
+		    ALPHA_PSL_IPL_HIGH);				\
+		continue;						\
+	}								\
+
+/*
+ * alpha_softint_dispatch:
  *
- *	Process pending software interrupts.
+ *	Process pending soft interrupts that are eligible to run
+ *	at the specified new IPL.  Must be called at splhigh().
  */
 void
-softintr_dispatch(void)
+alpha_softint_dispatch(int const ipl)
 {
+	struct lwp * const l = curlwp;
+	struct cpu_info * const ci = l->l_cpu;
+	unsigned long ssir;
+	const unsigned long eligible = SOFTINTS_ELIGIBLE(ipl);
+
+	KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_HIGH);
+
+	for (;;) {
+		ssir = ci->ci_ssir & eligible;
+		if (ssir == 0)
+			break;
 
-	/* XXX Nothing until alpha gets __HAVE_FAST_SOFTINTS */
+		DOSOFTINT(SERIAL);
+		DOSOFTINT(NET);
+		DOSOFTINT(BIO);
+		DOSOFTINT(CLOCK);
+	}
 }
 
-#ifdef __HAVE_FAST_SOFTINTS
+#endif /* __HAVE_FAST_SOFTINTS */
+
 /*
- * softint_trigger:
+ * spllower:
  *
- *	Trigger a soft interrupt.
+ *	Lower interrupt priority.  May need to check for software
+ *	interrupts.
  */
 void
-softint_trigger(uintptr_t machdep)
+spllower(int const ipl)
 {
-	atomic_or_ulong(&curcpu()->ci_ssir, 1 << (x))
+
+#ifdef __HAVE_FAST_SOFTINTS
+	if (ipl < ALPHA_PSL_IPL_SOFT_HI && curcpu()->ci_ssir) {
+		(void) alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
+		alpha_softint_dispatch(ipl);
+	}
+#endif /* __HAVE_FAST_SOFTINTS */
+	(void) alpha_pal_swpipl(ipl);
 }
-#endif
 
 /*
  * cpu_intr_p:

Index: src/sys/arch/alpha/alpha/locore.s
diff -u src/sys/arch/alpha/alpha/locore.s:1.132 src/sys/arch/alpha/alpha/locore.s:1.133
--- src/sys/arch/alpha/alpha/locore.s:1.132	Sat Sep  5 18:01:42 2020
+++ src/sys/arch/alpha/alpha/locore.s	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: locore.s,v 1.133 2020/09/16 04:07:32 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <machine/asm.h>
 
-__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.132 2020/09/05 18:01:42 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.133 2020/09/16 04:07:32 thorpej Exp $");
 
 #include "assym.h"
 
@@ -243,19 +243,28 @@ LEAF(exception_return, 1)			/* XXX shoul
 	br	pv, 1f
 1:	LDGP(pv)
 
-	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
-	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
-	bne	t0, 5f				/* != 0: can't do AST or SIR */
+	ldq	s1, (FRAME_PS * 8)(sp)		/* s1 = new PSL */
+	and	s1, ALPHA_PSL_IPL_MASK, s3	/* s3 = new ipl */
+
+	/* --- BEGIN inline spllower() --- */
+
+	cmpult	s3, ALPHA_PSL_IPL_SOFT_HI, t1	/* new IPL < SOFT_HI? */
+	beq	t1, 5f				/* no, can't do AST or SI */
+	/* yes */
 
 	/* GET_CURLWP clobbers v0, t0, t8...t11. */
 	GET_CURLWP
 	mov	v0, s0				/* s0 = curlwp */
 
+#ifdef __HAVE_FAST_SOFTINTS
 	/* see if a soft interrupt is pending. */
 2:	ldq	t1, L_CPU(s0)			/* t1 = curlwp->l_cpu */
 	ldq	t1, CPU_INFO_SSIR(t1)		/* soft int pending? */
 	bne	t1, 6f				/* yes */
 	/* no */
+#endif /* __HAVE_FAST_SOFTINTS */
+
+	/* --- END inline spllower() --- */
 
 	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
 	beq	t0, 5f				/* no: just return */
@@ -282,16 +291,19 @@ LEAF(exception_return, 1)			/* XXX shoul
 	.set at
 	/* NOTREACHED */
 
-	/* We've got a SIR */
-6:	ldiq	a0, ALPHA_PSL_IPL_SOFT_LO
+#ifdef __HAVE_FAST_SOFTINTS
+	/* We've got a softint */
+6:	ldiq	a0, ALPHA_PSL_IPL_HIGH
 	call_pal PAL_OSF1_swpipl
 	mov	v0, s2				/* remember old IPL */
-	CALL(softintr_dispatch)
+	mov	s3, a0				/* pass new ipl */
+	CALL(alpha_softint_dispatch)
 
-	/* SIR handled; restore IPL and check again */
+	/* SI handled; restore IPL and check again */
 	mov	s2, a0
 	call_pal PAL_OSF1_swpipl
 	br	2b
+#endif /* __HAVE_FAST_SOFTINTS */
 
 	/* We've got an AST */
 7:	stl	zero, L_MD_ASTPENDING(s0)	/* no AST pending */
@@ -643,13 +655,117 @@ LEAF(savectx, 1)
 
 /**************************************************************************/
 
+#ifdef __HAVE_FAST_SOFTINTS
+/*
+ * void alpha_softint_switchto(struct lwp *current, int ipl, struct lwp *next)
+ * Switch away from the current LWP to the specified softint LWP, and
+ * dispatch to softint processing.
+ * Aguments:
+ *	a0	'struct lwp *' of the LWP to switch from
+ *	a1	IPL that the softint will run at
+ *	a2	'struct lwp *' of the LWP to switch to
+ *
+ * N.B. We have arranged that a0 and a1 are already set up correctly
+ * for the call to softint_dispatch().
+ */
+NESTED_NOPROFILE(alpha_softint_switchto, 3, 16, ra, IM_RA, 0)
+	LDGP(pv)
+
+	ldq	a3, L_PCB(a0)			/* a3 = from->l_pcb */
+
+	lda	sp, -16(sp)			/* set up stack frame */
+	stq	ra, (16-8)(sp)			/* save ra */
+
+	/*
+	 * Step 1: Save the current LWP's context.  We don't
+	 * save the return address directly; instead, we arrange
+	 * for it to bounce through a trampoline that fixes up
+	 * the state in case the softint LWP blocks.
+	 */
+	stq	sp, PCB_HWPCB_KSP(a3)		/* store sp */
+	stq	s0, PCB_CONTEXT+(0 * 8)(a3)	/* store s0 - s6 */
+	stq	s1, PCB_CONTEXT+(1 * 8)(a3)
+	stq	s2, PCB_CONTEXT+(2 * 8)(a3)
+	stq	s3, PCB_CONTEXT+(3 * 8)(a3)
+	stq	s4, PCB_CONTEXT+(4 * 8)(a3)
+	stq	s5, PCB_CONTEXT+(5 * 8)(a3)
+	stq	s6, PCB_CONTEXT+(6 * 8)(a3)
+
+	/* Set the trampoline address in saved context. */
+	lda	v0, alpha_softint_return
+	stq	v0, PCB_CONTEXT+(7 * 8)(a3)	/* store ra */
+
+	/*
+	 * Step 2: Switch to the softint LWP's stack.
+	 * We always start at the top of the stack (i.e.
+	 * just below the trapframe).
+	 *
+	 * N.B. There is no need to restore any other registers
+	 * from the softint LWP's context; we are starting from
+	 * the root of the call graph.
+	 */
+	ldq	sp, L_MD_TF(a2)
+
+	/*
+	 * Step 3: Update curlwp.
+	 *
+	 * N.B. We save off the from-LWP argument that will be passed
+	 * to softint_dispatch() in s0, which we'll need to restore
+	 * before returning.  If we bounce through the trampoline, the
+	 * context switch will restore it for us.
+	 */
+	mov	a0, s0			/* s0 = from LWP */
+	SET_CURLWP(a2)			/* clobbers a0, v0, t0, t8..t11 */
+
+	/*
+	 * Step 4: Call softint_dispatch().
+	 *
+	 * N.B. a1 already has the IPL argument.
+	 */
+	mov	s0, a0			/* a0 = from LWP */
+	CALL(softint_dispatch)
+
+	/*
+	 * Step 5: Restore everything and return.
+	 */
+	ldq	a3, L_PCB(s0)			/* a3 = from->l_pcb */
+	SET_CURLWP(s0)			/* clobbers a0, v0, t0, t8..t11 */
+	ldq	sp, PCB_HWPCB_KSP(a3)		/* restore sp */
+	ldq	s0, PCB_CONTEXT+(0 * 8)(a3)	/* restore s0 */
+	ldq	ra, (16-8)(sp)			/* restore ra */
+	lda	sp, 16(sp)			/* pop stack frame */
+	RET
+	END(alpha_softint_switchto)
+
+LEAF_NOPROFILE(alpha_softint_return, 0)
+	/*
+	 * Step 1: Re-adjust the mutex count after mi_switch().
+	 */
+	GET_CURLWP
+	ldq	v0, L_CPU(v0)
+	ldl	t0, CPU_INFO_MTX_COUNT(v0)
+	addl	t0, 1, t0
+	stl	t0, CPU_INFO_MTX_COUNT(v0)
+
+	/*
+	 * Step 2: Pop alpha_softint_switchto()'s stack frame
+	 * and return.
+	 */
+	ldq	ra, (16-8)(sp)			/* restore ra */
+	lda	sp, 16(sp)			/* pop stack frame */
+	RET
+	END(alpha_softint_return)
+#endif /* __HAVE_FAST_SOFTINTS */
 
 /*
- * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next)
+ * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next,
+ *                          bool returning)
  * Switch to the specified next LWP
  * Arguments:
  *	a0	'struct lwp *' of the LWP to switch from
  *	a1	'struct lwp *' of the LWP to switch to
+ *	a2	non-zero if we're returning to an interrupted LWP
+ *		from a soft interrupt
  */
 LEAF(cpu_switchto, 0)
 	LDGP(pv)
@@ -657,25 +773,37 @@ LEAF(cpu_switchto, 0)
 	/*
 	 * do an inline savectx(), to save old context
 	 */
-	ldq	a2, L_PCB(a0)
+	ldq	a3, L_PCB(a0)
 	/* NOTE: ksp is stored by the swpctx */
-	stq	s0, PCB_CONTEXT+(0 * 8)(a2)	/* store s0 - s6 */
-	stq	s1, PCB_CONTEXT+(1 * 8)(a2)
-	stq	s2, PCB_CONTEXT+(2 * 8)(a2)
-	stq	s3, PCB_CONTEXT+(3 * 8)(a2)
-	stq	s4, PCB_CONTEXT+(4 * 8)(a2)
-	stq	s5, PCB_CONTEXT+(5 * 8)(a2)
-	stq	s6, PCB_CONTEXT+(6 * 8)(a2)
-	stq	ra, PCB_CONTEXT+(7 * 8)(a2)	/* store ra */
+	stq	s0, PCB_CONTEXT+(0 * 8)(a3)	/* store s0 - s6 */
+	stq	s1, PCB_CONTEXT+(1 * 8)(a3)
+	stq	s2, PCB_CONTEXT+(2 * 8)(a3)
+	stq	s3, PCB_CONTEXT+(3 * 8)(a3)
+	stq	s4, PCB_CONTEXT+(4 * 8)(a3)
+	stq	s5, PCB_CONTEXT+(5 * 8)(a3)
+	stq	s6, PCB_CONTEXT+(6 * 8)(a3)
+	stq	ra, PCB_CONTEXT+(7 * 8)(a3)	/* store ra */
 
 	mov	a0, s4				/* save old curlwp */
 	mov	a1, s2				/* save new lwp */
 
+#ifdef __HAVE_FAST_SOFTINTS
+	/*
+	 * Check to see if we're doing a light-weight switch back to
+	 * an interrupted LWP (referred to as the "pinned" LWP) from
+	 * a softint LWP.  In this case we have been running on the
+	 * pinned LWP's context -- swpctx was not used to get here --
+	 * so we won't be using swpctx to go back, either.
+	 */
+	bne	a2, 3f			/* yes, go handle it */
+	/* no, normal context switch */
+#endif /* __HAVE_FAST_SOFTINTS */
+
 	/* Switch to the new PCB. */
 	ldq	a0, L_MD_PCBPADDR(s2)
-	call_pal PAL_OSF1_swpctx	/* clobbers a0, t0, t8-t11, a0 */
+	call_pal PAL_OSF1_swpctx	/* clobbers a0, t0, t8-t11, v0 */
 
-	SET_CURLWP(s2)			/* curlwp = l */
+1:	SET_CURLWP(s2)			/* curlwp = l */
 
 	/*
 	 * Now running on the new PCB.
@@ -687,15 +815,15 @@ LEAF(cpu_switchto, 0)
 	 */
 	ldq	a0, L_PROC(s2)			/* first ras_lookup() arg */
 	ldq	t0, P_RASLIST(a0)		/* any RAS entries? */
-	beq	t0, 1f				/* no, skip */
+	beq	t0, 2f				/* no, skip */
 	ldq	s1, L_MD_TF(s2)			/* s1 = l->l_md.md_tf */
 	ldq	a1, (FRAME_PC*8)(s1)		/* second ras_lookup() arg */
 	CALL(ras_lookup)			/* ras_lookup(p, PC) */
 	addq	v0, 1, t0			/* -1 means "not in ras" */
-	beq	t0, 1f
+	beq	t0, 2f
 	stq	v0, (FRAME_PC*8)(s1)
 
-1:
+2:
 	mov	s4, v0				/* return the old lwp */
 	/*
 	 * Restore registers and return.
@@ -711,6 +839,23 @@ LEAF(cpu_switchto, 0)
 	ldq	s0, PCB_CONTEXT+(0 * 8)(s0)		/* restore s0 */
 
 	RET
+
+#ifdef __HAVE_FAST_SOFTINTS
+3:	/*
+	 * Registers right now:
+	 *
+	 *	a0	old LWP
+	 *	a1	new LWP
+	 *	a3	old PCB
+	 *
+	 * What we need to do here is swap the stack, since we won't
+	 * be getting that from swpctx.
+	 */
+	ldq	a2, L_PCB(a1)			/* a2 = new PCB */
+	stq	sp, PCB_HWPCB_KSP(a3)		/* save old SP */
+	ldq	sp, PCB_HWPCB_KSP(a2)		/* restore new SP */
+	br	1b				/* finish up */
+#endif /* __HAVE_FAST_SOFTINTS */
 	END(cpu_switchto)
 
 /*

Index: src/sys/arch/alpha/include/cpu.h
diff -u src/sys/arch/alpha/include/cpu.h:1.95 src/sys/arch/alpha/include/cpu.h:1.96
--- src/sys/arch/alpha/include/cpu.h:1.95	Sat Sep  5 18:01:42 2020
+++ src/sys/arch/alpha/include/cpu.h	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.h,v 1.95 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: cpu.h,v 1.96 2020/09/16 04:07:32 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -85,6 +85,7 @@
 #if defined(_KERNEL) || defined(_KMEMUSER)
 #include <sys/cpu_data.h>
 #include <sys/cctr.h>
+#include <sys/intr.h>
 #include <machine/frame.h>
 
 /*
@@ -109,6 +110,8 @@ struct cpu_info {
 
 	u_long ci_intrdepth;		/* interrupt trap depth */
 	volatile u_long ci_ssir;	/* simulated software interrupt reg */
+					/* LWPs for soft intr dispatch */
+	struct lwp *ci_silwps[SOFTINT_COUNT];
 	struct cpu_softc *ci_softc;	/* pointer to our device */
 
 	struct pmap *ci_pmap;		/* currently-activated pmap */

Index: src/sys/arch/alpha/include/intr.h
diff -u src/sys/arch/alpha/include/intr.h:1.75 src/sys/arch/alpha/include/intr.h:1.76
--- src/sys/arch/alpha/include/intr.h:1.75	Sat Sep  5 18:01:42 2020
+++ src/sys/arch/alpha/include/intr.h	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: intr.h,v 1.75 2020/09/05 18:01:42 thorpej Exp $ */
+/* $NetBSD: intr.h,v 1.76 2020/09/16 04:07:32 thorpej Exp $ */
 
 /*-
  * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
 #define _ALPHA_INTR_H_
 
 #include <sys/evcnt.h>
-#include <machine/cpu.h>
+#include <machine/alpha_cpu.h>
 
 /*
  * The Alpha System Control Block.  This is 8k long, and you get
@@ -107,8 +107,13 @@ struct scbvec {
 #define	IPL_NONE	ALPHA_PSL_IPL_0
 #define	IPL_SOFTCLOCK	ALPHA_PSL_IPL_SOFT_LO
 #define	IPL_SOFTBIO	ALPHA_PSL_IPL_SOFT_LO
-#define	IPL_SOFTNET	ALPHA_PSL_IPL_SOFT_LO	/* XXX HI */
-#define	IPL_SOFTSERIAL	ALPHA_PSL_IPL_SOFT_LO	/* XXX HI */
+#ifdef __HAVE_FAST_SOFTINTS
+#define	IPL_SOFTNET	ALPHA_PSL_IPL_SOFT_HI
+#define	IPL_SOFTSERIAL	ALPHA_PSL_IPL_SOFT_HI
+#else
+#define	IPL_SOFTNET	ALPHA_PSL_IPL_SOFT_LO
+#define	IPL_SOFTSERIAL	ALPHA_PSL_IPL_SOFT_LO
+#endif /* __HAVE_FAST_SOFTINTS */
 #define	IPL_VM		ALPHA_PSL_IPL_IO_HI
 #define	IPL_SCHED	ALPHA_PSL_IPL_CLOCK
 #define	IPL_HIGH	ALPHA_PSL_IPL_HIGH
@@ -150,6 +155,12 @@ _splraise(int s)
 
 #include <sys/spl.h>
 
+#ifdef __HAVE_FAST_SOFTINTS
+/* Fast soft interrupt dispatch. */
+void	alpha_softint_dispatch(int);
+void	alpha_softint_switchto(struct lwp *, int, struct lwp *);
+#endif /* __HAVE_FAST_SOFTINTS */
+
 /*
  * Interprocessor interrupts.  In order how we want them processed.
  */
@@ -202,8 +213,6 @@ struct alpha_shared_intr {
 	((asi)[num].intr_maxstrays != 0 &&				\
 	 (asi)[num].intr_nstrays == (asi)[num].intr_maxstrays)
 
-void	softintr_dispatch(void);
-
 struct alpha_shared_intr *alpha_shared_intr_alloc(unsigned int, unsigned int);
 int	alpha_shared_intr_dispatch(struct alpha_shared_intr *,
 	    unsigned int);

Index: src/sys/arch/alpha/include/types.h
diff -u src/sys/arch/alpha/include/types.h:1.57 src/sys/arch/alpha/include/types.h:1.58
--- src/sys/arch/alpha/include/types.h:1.57	Fri Sep  4 15:50:09 2020
+++ src/sys/arch/alpha/include/types.h	Wed Sep 16 04:07:32 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: types.h,v 1.57 2020/09/04 15:50:09 thorpej Exp $ */
+/* $NetBSD: types.h,v 1.58 2020/09/16 04:07:32 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1990, 1993
@@ -78,6 +78,7 @@ typedef __register_t	register_t;
 #define	__HAVE_MM_MD_DIRECT_MAPPED_IO
 #define	__HAVE_MM_MD_DIRECT_MAPPED_PHYS
 #define	__HAVE_CPU_DATA_FIRST
+/* #define __HAVE_FAST_SOFTINTS -- not yet */
 #define	__HAVE_CPU_UAREA_ROUTINES
 #define	__HAVE_CPU_LWP_SETPRIVATE
 #define	__HAVE___LWP_GETPRIVATE_FAST

Reply via email to