Hi,

visa@ says I need to unlock softintr_dispatch() before I can
unlock softclock(), so let's do that.

Additionally, when we call softintr_disestablish() we want to wait for
the underlying softintr handle to finish running if it is running.

We can start with amd64.

I think this approach will work:

- Keep a pointer to the running softintr, if any, in the queue.  NULL
  the pointer when we return from sih_func().

- Take/release the kernel lock if the SI_MPSAFE flag is present when
  we enter/leave sih_func().

- If the handle is running when you call softintr_disestablish(), spin
  until the handle isn't running anymore and retry.

There is no softintr manpage but I think it is understood that
softintr_disestablish() is only safe to call from a process context,
otherwise you may deadlock.  Maybe we should do splassert(IPL_NONE)?

We could probably sleep here instead of spinning.  We'd have to change
every softintr_disestablish() implementation to do that, though.
Otherwise you'd have different behavior on different platforms.

Index: amd64/softintr.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/softintr.c,v
retrieving revision 1.10
diff -u -p -r1.10 softintr.c
--- amd64/softintr.c    11 Sep 2020 09:27:09 -0000      1.10
+++ amd64/softintr.c    19 May 2021 21:53:02 -0000
@@ -85,35 +85,39 @@ softintr_dispatch(int which)
        floor = ci->ci_handled_intr_level;
        ci->ci_handled_intr_level = ci->ci_ilevel;
 
-       KERNEL_LOCK();
-       for (;;) {
-               mtx_enter(&si->softintr_lock);
+       mtx_enter(&si->softintr_lock);
+       while (!TAILQ_EMPTY(&si->softintr_q)) {
                sih = TAILQ_FIRST(&si->softintr_q);
-               if (sih == NULL) {
-                       mtx_leave(&si->softintr_lock);
-                       break;
-               }
                TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
                sih->sih_pending = 0;
 
-               uvmexp.softs++;
-
+               si->softintr_running = sih;
                mtx_leave(&si->softintr_lock);
 
+               if (!ISSET(sih->sih_flags, SI_MPSAFE))
+                       KERNEL_LOCK();
+
                (*sih->sih_fn)(sih->sih_arg);
+
+               if (!ISSET(sih->sih_flags, SI_MPSAFE))
+                       KERNEL_UNLOCK();
+
+               mtx_enter(&si->softintr_lock);
+               si->softintr_running = NULL;
+               uvmexp.softs++;
        }
-       KERNEL_UNLOCK();
+       mtx_leave(&si->softintr_lock);
 
        ci->ci_handled_intr_level = floor;
 }
 
 /*
- * softintr_establish:         [interface]
+ * softintr_establish_flags:   [interface]
  *
  *     Register a software interrupt handler.
  */
 void *
-softintr_establish(int ipl, void (*func)(void *), void *arg)
+softintr_establish_flags(int ipl, void (*func)(void *), void *arg, u_int flags)
 {
        struct x86_soft_intr *si;
        struct x86_soft_intrhand *sih;
@@ -137,6 +141,9 @@ softintr_establish(int ipl, void (*func)
                panic("softintr_establish");
        }
 
+       if (ISSET(flags, ~SI_FLAG_MASK))
+               panic("%s: invalid flags: %x", __func__, flags);
+
        si = &x86_soft_intrs[which];
 
        sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
@@ -145,10 +152,17 @@ softintr_establish(int ipl, void (*func)
                sih->sih_fn = func;
                sih->sih_arg = arg;
                sih->sih_pending = 0;
+               sih->sih_flags = flags;
        }
        return (sih);
 }
 
+void *
+softintr_establish(int ipl, void (*func)(void *), void *arg)
+{
+       return softintr_establish_flags(ipl, func, arg, 0);
+}
+
 /*
  * softintr_disestablish:      [interface]
  *
@@ -160,10 +174,16 @@ softintr_disestablish(void *arg)
        struct x86_soft_intrhand *sih = arg;
        struct x86_soft_intr *si = sih->sih_intrhead;
 
+again:
        mtx_enter(&si->softintr_lock);
        if (sih->sih_pending) {
                TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
                sih->sih_pending = 0;
+       } else if (si->softintr_running == sih) {
+               mtx_leave(&si->softintr_lock);
+               while (si->softintr_running == sih)
+                       CPU_BUSY_CYCLE();
+               goto again;
        }
        mtx_leave(&si->softintr_lock);
 
Index: include/intr.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/intr.h,v
retrieving revision 1.32
diff -u -p -r1.32 intr.h
--- include/intr.h      17 Jun 2020 06:14:52 -0000      1.32
+++ include/intr.h      19 May 2021 21:53:02 -0000
@@ -240,15 +240,21 @@ struct x86_soft_intrhand {
        void    (*sih_fn)(void *);
        void    *sih_arg;
        int     sih_pending;
+       u_int   sih_flags;
 };
 
+#define SI_MPSAFE      0x01
+#define SI_FLAG_MASK   0x01
+
 struct x86_soft_intr {
        TAILQ_HEAD(, x86_soft_intrhand)
                        softintr_q;
+       volatile struct x86_soft_intrhand *softintr_running;
        int             softintr_ssir;
        struct mutex    softintr_lock;
 };
 
+void   *softintr_establish_flags(int, void (*)(void *), void *, u_int);
 void   *softintr_establish(int, void (*)(void *), void *);
 void   softintr_disestablish(void *);
 void   softintr_init(void);

Reply via email to