Module Name:    src
Committed By:   matt
Date:           Sat Apr 11 19:39:09 UTC 2015

Modified Files:
        src/sys/arch/arm/pic: pic.c

Log Message:
Adapt pic to deal with the BCM2836 interrupts.
Move pic_pending_pics, pic_pending_ipls, and pic_blocked_irqs into a
structure and make then per-cpu.  There is no global interrupt state anymore.


To generate a diff of this commit:
cvs rdiff -u -r1.28 -r1.29 src/sys/arch/arm/pic/pic.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/pic/pic.c
diff -u src/sys/arch/arm/pic/pic.c:1.28 src/sys/arch/arm/pic/pic.c:1.29
--- src/sys/arch/arm/pic/pic.c:1.28	Wed Apr  8 21:43:30 2015
+++ src/sys/arch/arm/pic/pic.c	Sat Apr 11 19:39:09 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: pic.c,v 1.28 2015/04/08 21:43:30 matt Exp $	*/
+/*	$NetBSD: pic.c,v 1.29 2015/04/11 19:39:09 matt Exp $	*/
 /*-
  * Copyright (c) 2008 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -33,7 +33,7 @@
 #include "opt_multiprocessor.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.28 2015/04/08 21:43:30 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.29 2015/04/11 19:39:09 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -61,17 +61,31 @@ __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.28
 #include <arm/pic/picvar.h>
 
 #if defined(__HAVE_PIC_PENDING_INTRS)
+/*
+ * This implementation of pending interrupts on a MULTIPROCESSOR system makes
+ * the assumption that a PIC (pic_softc) shall only have all its interrupts
+ * come from the same CPU.  In other words, interrupts from a single PIC will
+ * not be distributed among multiple CPUs.
+ */
+struct pic_pending {
+	volatile uint32_t blocked_pics;
+	volatile uint32_t pending_pics;
+	volatile uint32_t pending_ipls;
+};
 static uint32_t
 	pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
 static struct pic_softc *
-	pic_list_find_pic_by_pending_ipl(uint32_t);
+	pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t);
 static void
-	pic_deliver_irqs(struct pic_softc *, int, void *);
+	pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *);
 static void
-	pic_list_deliver_irqs(register_t, int, void *);
-volatile uint32_t pic_blocked_pics;
-volatile uint32_t pic_pending_pics;
-volatile uint32_t pic_pending_ipls;
+	pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *);
+
+#ifdef MULTIPROCESSOR
+percpu_t *pic_pending_percpu;
+#else
+struct pic_pending pic_pending;
+#endif /* MULTIPROCESSOR */
 #endif /* __HAVE_PIC_PENDING_INTRS */
 
 struct pic_softc *pic_list[PIC_MAXPICS];
@@ -162,12 +176,22 @@ pic_ipi_sender(void)
 void
 intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
 {
-	struct pic_softc * const pic = pic_ipi_sender();
 	KASSERT(ipi < NIPI);
-	if (cold && pic == NULL)
-		return;
-	KASSERT(pic != NULL);
-	(*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
+	bool __diagused sent_p = false;
+	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
+		struct pic_softc * const pic = pic_list[slot];
+		if (pic == NULL || pic->pic_cpus == NULL)
+			continue;
+		if (kcp == NULL || kcpuset_intersect(kcp, pic->pic_cpus)) {
+			(*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
+			// If we were targeting a single CPU or this pic
+			// handles all cpus, we're done.
+			if (kcp != NULL || pic->pic_cpus == kcpuset_running)
+				return;
+			sent_p = true;
+		}
+	}
+	KASSERT(cold || sent_p);
 }
 #endif /* MULTIPROCESSOR */
 
@@ -208,8 +232,16 @@ pic_mark_pending_source(struct pic_softc
 	    __BIT(is->is_irq & 0x1f));
 
 	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
-	atomic_or_32(&pic_pending_ipls, ipl_mask);
-	atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+	struct pic_pending *pend = &pic_pending;
+#endif
+	atomic_or_32(&pend->pending_ipls, ipl_mask);
+	atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+	percpu_putref(pic_pending_percpu);
+#endif
 }
 
 void
@@ -252,9 +284,16 @@ pic_mark_pending_sources(struct pic_soft
 	}
 
 	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
-	atomic_or_32(&pic_pending_ipls, ipl_mask);
-	atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));
-
+#ifdef MULTIPROCESSOR
+	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+	struct pic_pending *pend = &pic_pending;
+#endif
+	atomic_or_32(&pend->pending_ipls, ipl_mask);
+	atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+	percpu_putref(pic_pending_percpu);
+#endif
 	return ipl_mask;
 }
 
@@ -324,7 +363,8 @@ pic_dispatch(struct intrsource *is, void
 
 #if defined(__HAVE_PIC_PENDING_INTRS)
 void
-pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame)
+pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl,
+    void *frame)
 {
 	const uint32_t ipl_mask = __BIT(ipl);
 	struct intrsource *is;
@@ -339,7 +379,7 @@ pic_deliver_irqs(struct pic_softc *pic, 
 	uint32_t blocked_irqs;
 	int irq;
 	bool progress __diagused = false;
-	
+
 	KASSERT(pic->pic_pending_ipls & ipl_mask);
 
 	irq_base = 0;
@@ -403,7 +443,7 @@ pic_deliver_irqs(struct pic_softc *pic, 
 		} while (pending_irqs);
 		if (blocked_irqs) {
 			atomic_or_32(iblocked, blocked_irqs);
-			atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id));
+			atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id));
 		}
 	}
 
@@ -413,15 +453,16 @@ pic_deliver_irqs(struct pic_softc *pic, 
 	 * about these.
 	 */
 	if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
-		atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id));
+		atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id));
 }
 
 static void
-pic_list_unblock_irqs(void)
+pic_list_unblock_irqs(struct pic_pending *pend)
 {
-	uint32_t blocked_pics = pic_blocked_pics;
+	uint32_t blocked_pics = pend->blocked_pics;
+
+	pend->blocked_pics = 0;
 
-	pic_blocked_pics = 0;
 	for (;;) {
 		struct pic_softc *pic;
 #if PIC_MAXSOURCES > 32
@@ -458,9 +499,9 @@ pic_list_unblock_irqs(void)
 
 
 struct pic_softc *
-pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask)
+pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask)
 {
-	uint32_t pending_pics = pic_pending_pics;
+	uint32_t pending_pics = pend->pending_pics;
 	struct pic_softc *pic;
 
 	for (;;) {
@@ -477,16 +518,17 @@ pic_list_find_pic_by_pending_ipl(uint32_
 }
 
 void
-pic_list_deliver_irqs(register_t psw, int ipl, void *frame)
+pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl,
+    void *frame)
 {
 	const uint32_t ipl_mask = __BIT(ipl);
 	struct pic_softc *pic;
 
-	while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) {
-		pic_deliver_irqs(pic, ipl, frame);
+	while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) {
+		pic_deliver_irqs(pend, pic, ipl, frame);
 		KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
 	}
-	atomic_and_32(&pic_pending_ipls, ~ipl_mask);
+	atomic_and_32(&pend->pending_ipls, ~ipl_mask);
 }
 #endif /* __HAVE_PIC_PENDING_INTRS */
 
@@ -499,19 +541,27 @@ pic_do_pending_ints(register_t psw, int 
 		return;
 	}
 #if defined(__HAVE_PIC_PENDING_INTRS)
-	while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
-		KASSERT(pic_pending_ipls < __BIT(NIPL));
+#ifdef MULTIPROCESSOR
+	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+	struct pic_pending *pend = &pic_pending;
+#endif
+	while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
+		KASSERT(pend->pending_ipls < __BIT(NIPL));
 		for (;;) {
-			int ipl = 31 - __builtin_clz(pic_pending_ipls);
+			int ipl = 31 - __builtin_clz(pend->pending_ipls);
 			KASSERT(ipl < NIPL);
 			if (ipl <= newipl)
 				break;
 
 			pic_set_priority(ci, ipl);
-			pic_list_deliver_irqs(psw, ipl, frame);
-			pic_list_unblock_irqs();
+			pic_list_deliver_irqs(pend, psw, ipl, frame);
+			pic_list_unblock_irqs(pend);
 		}
 	}
+#ifdef MULTIPROCESSOR
+	percpu_putref(pic_pending_percpu);
+#endif
 #endif /* __HAVE_PIC_PENDING_INTRS */
 #ifdef __HAVE_PREEEMPTION
 	if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
@@ -554,6 +604,15 @@ pic_percpu_allocate(void *v0, void *v1, 
 #endif
 }
 
+#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
+static void
+pic_pending_zero(void *v0, void *v1, struct cpu_info *ci)
+{
+	struct pic_pending * const p = v0;
+	memset(p, 0, sizeof(*p));
+}
+#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
+
 void
 pic_add(struct pic_softc *pic, int irqbase)
 {
@@ -561,6 +620,18 @@ pic_add(struct pic_softc *pic, int irqba
 
 	KASSERT(strlen(pic->pic_name) > 0);
 
+#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
+	if (__predict_false(pic_pending_percpu == NULL)) {
+		pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending));
+		KASSERT(pic_pending_percpu != NULL);
+
+		/*
+		 * Now zero the per-cpu pending data.
+		 */
+		percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL);
+	}
+#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
+
 	for (slot = 0; slot < PIC_MAXPICS; slot++) {
 		struct pic_softc * const xpic = pic_list[slot];
 		if (xpic == NULL) {
@@ -614,7 +685,7 @@ pic_add(struct pic_softc *pic, int irqba
 	KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
 #endif
 #ifdef MULTIPROCESSOR
-	KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL));
+	KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
 #endif
 	pic_list[slot] = pic;
 }

Reply via email to