Module Name:    src
Committed By:   thorpej
Date:           Sat Sep 19 01:32:16 UTC 2020

Modified Files:
        src/sys/arch/alpha/alpha: locore.s

Log Message:
- The Alpha fast-soft-intrs implementation supports 2 soft interrupt levels,
  so in exception_return() ensure we clear ineligible-at-new-IPL softint
  bits from the SSIR, otherwise we could loop forever in the following
  scenario:

        processing softnet -> clock interrupt -> schedule softclock

- If the softint thread blocks, it's possible to bounce back through
  the softint return trampoline at something other than IPL_HIGH.  This
  is not a problem other than it's contrary to what alpha_softint_dispatch()
  expect, so make alpha_softint_return() go to IPL_HIGH before it does
  anything else.

These two fixes make fast-soft-interrupts work on Alpha.


To generate a diff of this commit:
cvs rdiff -u -r1.135 -r1.136 src/sys/arch/alpha/alpha/locore.s

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/alpha/alpha/locore.s
diff -u src/sys/arch/alpha/alpha/locore.s:1.135 src/sys/arch/alpha/alpha/locore.s:1.136
--- src/sys/arch/alpha/alpha/locore.s:1.135	Fri Sep 18 00:11:31 2020
+++ src/sys/arch/alpha/alpha/locore.s	Sat Sep 19 01:32:16 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.s,v 1.135 2020/09/18 00:11:31 thorpej Exp $ */
+/* $NetBSD: locore.s,v 1.136 2020/09/19 01:32:16 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <machine/asm.h>
 
-__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.135 2020/09/18 00:11:31 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.136 2020/09/19 01:32:16 thorpej Exp $");
 
 #include "assym.h"
 
@@ -265,9 +265,21 @@ LEAF(exception_return, 1)			/* XXX shoul
 	GET_CURLWP
 	mov	v0, s0				/* s0 = curlwp */
 
-	/* see if a soft interrupt is pending. */
-2:	ldq	t1, L_CPU(s0)			/* t1 = curlwp->l_cpu */
-	ldq	t1, CPU_INFO_SSIR(t1)		/* soft int pending? */
+2:	/*
+	 * Check to see if a soft interrupt is pending.  We need to only
+	 * check for soft ints eligible to run at the new IPL.  We generate
+	 * the mask of elible soft ints to run by masking the ssir with:
+	 *
+	 *	(ALPHA_ALL_SOFTINTS << ((ipl) << 1))
+	 *
+	 * See alpha_softint_dispatch().
+	 */
+	ldq	t1, L_CPU(s0)			/* t1 = curlwp->l_cpu */
+	ldiq	t2, ALPHA_ALL_SOFTINTS		/* t2 = ALPHA_ALL_SOFTINTS */
+	ldq	t1, CPU_INFO_SSIR(t1)		/* t1 = t1->ci_ssir */
+	sll	s3, 1, t3			/* t3 = ipl << 1 */
+	sll	t2, t3, t2			/* t2 <<= t3 */
+	and	t1, t2, t1			/* t1 &= t2 */
 	bne	t1, 6f				/* yes */
 	/* no */
 
@@ -743,7 +755,14 @@ NESTED_NOPROFILE(alpha_softint_switchto,
 
 LEAF_NOPROFILE(alpha_softint_return, 0)
 	/*
-	 * Step 1: Re-adjust the mutex count after mi_switch().
+	 * Step 1: Go to IPL_HIGH, which is what the alpha_softint_dispatch()
+	 * expects.  We will have arrived here at IPL_SCHED.
+	 */
+	ldiq	a0, ALPHA_PSL_IPL_HIGH
+	call_pal PAL_OSF1_swpipl
+
+	/*
+	 * Step 2: Re-adjust the mutex count after mi_switch().
 	 */
 	GET_CURLWP
 	ldq	v0, L_CPU(v0)
@@ -752,7 +771,7 @@ LEAF_NOPROFILE(alpha_softint_return, 0)
 	stl	t0, CPU_INFO_MTX_COUNT(v0)
 
 	/*
-	 * Step 2: Pop alpha_softint_switchto()'s stack frame
+	 * Step 3: Pop alpha_softint_switchto()'s stack frame
 	 * and return.
 	 */
 	ldq	ra, 0(sp)			/* restore ra */

Reply via email to