On Thu, May 27, 2021 at 07:40:26PM -0500, Scott Cheloha wrote:
> On Sun, May 23, 2021 at 09:05:24AM +0000, Visa Hankala wrote:
> > When a CPU starts processing a soft interrupt, it reserves the handler
> > to prevent concurrent execution. If the soft interrupt gets rescheduled
> > during processing, the handler is run again by the same CPU. This breaks
> > FIFO ordering, though.
>
> If you want to preserve FIFO you can reinsert the handler at the queue
> tail. That would be more fair.
>
> If FIFO is the current behavior I think we ought to keep it.
I have updated the patch to preserve the FIFO order.
> > +STAILQ_HEAD(x86_soft_intr_queue, x86_soft_intrhand);
> > +
> > +struct x86_soft_intr_queue softintr_queue[X86_NSOFTINTR];
>
> Why did we switch to STAILQ? I know we don't have very many
> softintr_disestablish() calls but isn't O(1) removal worth the extra
> pointer?
I used STAILQ because it avoids the hassle of updating the list nodes'
back pointers. softintr_disestablish() with multiple items pending in
the queue is very rare in comparison to the normal softintr_schedule() /
softintr_dispatch() cycle.
However, I have changed the code back to using TAILQ.
Index: arch/amd64/amd64/softintr.c
===================================================================
RCS file: src/sys/arch/amd64/amd64/softintr.c,v
retrieving revision 1.10
diff -u -p -r1.10 softintr.c
--- arch/amd64/amd64/softintr.c 11 Sep 2020 09:27:09 -0000 1.10
+++ arch/amd64/amd64/softintr.c 21 Jun 2021 13:32:33 -0000
@@ -37,12 +37,33 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
#include <machine/intr.h>
#include <uvm/uvm_extern.h>
-struct x86_soft_intr x86_soft_intrs[X86_NSOFTINTR];
+struct x86_soft_intrhand {
+ TAILQ_ENTRY(x86_soft_intrhand) sih_q;
+ void (*sih_fn)(void *);
+ void *sih_arg;
+ struct cpu_info *sih_runner;
+ int sih_which;
+ unsigned short sih_flags;
+ unsigned short sih_state;
+};
+
+#define SIF_MPSAFE 0x0001
+
+#define SIS_DYING 0x0001
+#define SIS_PENDING 0x0002
+#define SIS_RESTART 0x0004
+
+TAILQ_HEAD(x86_soft_intr_queue, x86_soft_intrhand);
+
+struct x86_soft_intr_queue softintr_queue[X86_NSOFTINTR];
+struct mutex softintr_lock = MUTEX_INITIALIZER(IPL_HIGH);
const int x86_soft_intr_to_ssir[X86_NSOFTINTR] = {
SIR_CLOCK,
@@ -58,15 +79,10 @@ const int x86_soft_intr_to_ssir[X86_NSOF
void
softintr_init(void)
{
- struct x86_soft_intr *si;
int i;
- for (i = 0; i < X86_NSOFTINTR; i++) {
- si = &x86_soft_intrs[i];
- TAILQ_INIT(&si->softintr_q);
- mtx_init(&si->softintr_lock, IPL_HIGH);
- si->softintr_ssir = x86_soft_intr_to_ssir[i];
- }
+ for (i = 0; i < nitems(softintr_queue); i++)
+ TAILQ_INIT(&softintr_queue[i]);
}
/*
@@ -78,31 +94,44 @@ void
softintr_dispatch(int which)
{
struct cpu_info *ci = curcpu();
- struct x86_soft_intr *si = &x86_soft_intrs[which];
+ struct x86_soft_intr_queue *queue = &softintr_queue[which];
struct x86_soft_intrhand *sih;
int floor;
floor = ci->ci_handled_intr_level;
ci->ci_handled_intr_level = ci->ci_ilevel;
- KERNEL_LOCK();
- for (;;) {
- mtx_enter(&si->softintr_lock);
- sih = TAILQ_FIRST(&si->softintr_q);
- if (sih == NULL) {
- mtx_leave(&si->softintr_lock);
- break;
+ mtx_enter(&softintr_lock);
+ while ((sih = TAILQ_FIRST(queue)) != NULL) {
+ KASSERT((sih->sih_state & (SIS_PENDING | SIS_RESTART)) ==
+ SIS_PENDING);
+ KASSERT(sih->sih_runner == NULL);
+
+ sih->sih_state &= ~SIS_PENDING;
+ TAILQ_REMOVE(queue, sih, sih_q);
+ sih->sih_runner = ci;
+ mtx_leave(&softintr_lock);
+
+ if (sih->sih_flags & SIF_MPSAFE) {
+ (*sih->sih_fn)(sih->sih_arg);
+ } else {
+ KERNEL_LOCK();
+ (*sih->sih_fn)(sih->sih_arg);
+ KERNEL_UNLOCK();
}
- TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
- sih->sih_pending = 0;
- uvmexp.softs++;
-
- mtx_leave(&si->softintr_lock);
+ mtx_enter(&softintr_lock);
+ KASSERT((sih->sih_state & SIS_PENDING) == 0);
+ sih->sih_runner = NULL;
+ if (sih->sih_state & SIS_RESTART) {
+ TAILQ_INSERT_TAIL(queue, sih, sih_q);
+ sih->sih_state |= SIS_PENDING;
+ sih->sih_state &= ~SIS_RESTART;
+ }
- (*sih->sih_fn)(sih->sih_arg);
+ uvmexp.softs++;
}
- KERNEL_UNLOCK();
+ mtx_leave(&softintr_lock);
ci->ci_handled_intr_level = floor;
}
@@ -115,9 +144,13 @@ softintr_dispatch(int which)
void *
softintr_establish(int ipl, void (*func)(void *), void *arg)
{
- struct x86_soft_intr *si;
struct x86_soft_intrhand *sih;
int which;
+ unsigned short flags = 0;
+
+ if (ipl & IPL_MPSAFE)
+ flags |= SIF_MPSAFE;
+ ipl &= ~IPL_MPSAFE;
switch (ipl) {
case IPL_SOFTCLOCK:
@@ -137,14 +170,14 @@ softintr_establish(int ipl, void (*func)
panic("softintr_establish");
}
- si = &x86_soft_intrs[which];
-
sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
if (__predict_true(sih != NULL)) {
- sih->sih_intrhead = si;
sih->sih_fn = func;
sih->sih_arg = arg;
- sih->sih_pending = 0;
+ sih->sih_runner = NULL;
+ sih->sih_which = which;
+ sih->sih_flags = flags;
+ sih->sih_state = 0;
}
return (sih);
}
@@ -157,15 +190,52 @@ softintr_establish(int ipl, void (*func)
void
softintr_disestablish(void *arg)
{
+ struct cpu_info *runner;
struct x86_soft_intrhand *sih = arg;
- struct x86_soft_intr *si = sih->sih_intrhead;
- mtx_enter(&si->softintr_lock);
- if (sih->sih_pending) {
- TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
- sih->sih_pending = 0;
+ assertwaitok();
+
+ mtx_enter(&softintr_lock);
+ sih->sih_state |= SIS_DYING;
+ sih->sih_state &= ~SIS_RESTART;
+ if (sih->sih_state & SIS_PENDING) {
+ sih->sih_state &= ~SIS_PENDING;
+ TAILQ_REMOVE(&softintr_queue[sih->sih_which], sih, sih_q);
}
- mtx_leave(&si->softintr_lock);
+ runner = sih->sih_runner;
+ mtx_leave(&softintr_lock);
+
+ if (runner != NULL)
+ sched_barrier(runner);
+
+ KASSERT((sih->sih_state & (SIS_PENDING | SIS_RESTART)) == 0);
+ KASSERT(sih->sih_runner == NULL);
free(sih, M_DEVBUF, sizeof(*sih));
}
+
+void
+softintr_schedule(void *arg)
+{
+ struct x86_soft_intrhand *sih = arg;
+ struct x86_soft_intr_queue *queue = &softintr_queue[sih->sih_which];
+ int trigger = 0;
+
+ mtx_enter(&softintr_lock);
+ KASSERT((sih->sih_state & SIS_DYING) == 0);
+ if (sih->sih_runner == NULL) {
+ KASSERT((sih->sih_state & SIS_RESTART) == 0);
+ if ((sih->sih_state & SIS_PENDING) == 0) {
+ TAILQ_INSERT_TAIL(queue, sih, sih_q);
+ sih->sih_state |= SIS_PENDING;
+ trigger = 1;
+ }
+ } else {
+ KASSERT((sih->sih_state & SIS_PENDING) == 0);
+ sih->sih_state |= SIS_RESTART;
+ }
+ mtx_leave(&softintr_lock);
+
+ if (trigger)
+ softintr(x86_soft_intr_to_ssir[sih->sih_which]);
+}
Index: arch/amd64/include/intr.h
===================================================================
RCS file: src/sys/arch/amd64/include/intr.h,v
retrieving revision 1.32
diff -u -p -r1.32 intr.h
--- arch/amd64/include/intr.h 17 Jun 2020 06:14:52 -0000 1.32
+++ arch/amd64/include/intr.h 21 Jun 2021 13:32:33 -0000
@@ -231,42 +231,13 @@ extern void (*ipifunc[X86_NIPI])(struct
#define X86_NSOFTINTR 3
#ifndef _LOCORE
-#include <sys/queue.h>
-
-struct x86_soft_intrhand {
- TAILQ_ENTRY(x86_soft_intrhand)
- sih_q;
- struct x86_soft_intr *sih_intrhead;
- void (*sih_fn)(void *);
- void *sih_arg;
- int sih_pending;
-};
-
-struct x86_soft_intr {
- TAILQ_HEAD(, x86_soft_intrhand)
- softintr_q;
- int softintr_ssir;
- struct mutex softintr_lock;
-};
void *softintr_establish(int, void (*)(void *), void *);
void softintr_disestablish(void *);
void softintr_init(void);
void softintr_dispatch(int);
+void softintr_schedule(void *);
-#define softintr_schedule(arg)
\
-do { \
- struct x86_soft_intrhand *__sih = (arg); \
- struct x86_soft_intr *__si = __sih->sih_intrhead; \
- \
- mtx_enter(&__si->softintr_lock); \
- if (__sih->sih_pending == 0) { \
- TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
- __sih->sih_pending = 1; \
- softintr(__si->softintr_ssir); \
- } \
- mtx_leave(&__si->softintr_lock); \
-} while (/*CONSTCOND*/ 0)
#endif /* _LOCORE */
#endif /* !_MACHINE_INTR_H_ */