We can't predict netlock state when pipex(4) related (*if_qstart)()
handlers called. This means we can't use netlock within pppac_qstart()
and pppx_if_qstart() handlers. But actually we can't avoid netlock only
when we call (*if_output)() in pipex(4) PPPOE output path.

Introduce `pipexoutq' mbuf(9) queue, and put PPPOE related mbufs within.
Do (*if_output)() calls within netisr handler with netlock held.

The netlock assertion is still kept within pppac_qstart(),
pppx_if_qstart() and underlay code because some per session data relies
on netlock. These assertions will be removed with following diffs.

I also want to use mbuf(9) queue for pppoe(4) input path and remove
existing serialization hack provided by kernel lock.

Index: sys/net/if.c
===================================================================
RCS file: /cvs/src/sys/net/if.c,v
retrieving revision 1.653
diff -u -p -r1.653 if.c
--- sys/net/if.c        7 Jun 2022 22:18:34 -0000       1.653
+++ sys/net/if.c        27 Jun 2022 12:53:37 -0000
@@ -914,6 +914,11 @@ if_netisr(void *unused)
                if (n & (1 << NETISR_BRIDGE))
                        bridgeintr();
 #endif
+#ifdef PIPEX
+               if (n & (1 << NETISR_PIPEX))
+                       pipexintr();
+#endif
+
                t |= n;
        }
 
Index: sys/net/netisr.h
===================================================================
RCS file: /cvs/src/sys/net/netisr.h,v
retrieving revision 1.56
diff -u -p -r1.56 netisr.h
--- sys/net/netisr.h    28 Apr 2022 16:56:39 -0000      1.56
+++ sys/net/netisr.h    27 Jun 2022 12:53:37 -0000
@@ -45,6 +45,7 @@
 #define        NETISR_PFSYNC   5               /* for pfsync "immediate" tx */
 #define        NETISR_ARP      18              /* same as AF_LINK */
 #define        NETISR_IPV6     24              /* same as AF_INET6 */
+#define NETISR_PIPEX   27              /* for pipex processing */
 #define        NETISR_PPP      28              /* for PPP processing */
 #define        NETISR_BRIDGE   29              /* for bridge processing */
 #define        NETISR_SWITCH   31              /* for switch dataplane */
@@ -65,6 +66,7 @@ void  pppintr(void);
 void   bridgeintr(void);
 void   switchintr(void);
 void   pfsyncintr(void);
+void   pipexintr(void);
 
 #define        schednetisr(anisr)                                              
\
 do {                                                                   \
Index: sys/net/pipex.c
===================================================================
RCS file: /cvs/src/sys/net/pipex.c,v
retrieving revision 1.141
diff -u -p -r1.141 pipex.c
--- sys/net/pipex.c     26 Jun 2022 22:51:58 -0000      1.141
+++ sys/net/pipex.c     27 Jun 2022 12:53:37 -0000
@@ -52,6 +52,7 @@
 #include <net/route.h>
 #include <net/ppp_defs.h>
 #include <net/ppp-comp.h>
+#include <net/netisr.h>
 
 #include "pf.h"
 #if NPF > 0
@@ -106,6 +107,9 @@ struct radix_node_head      *pipex_rd_head6 =
 struct timeout pipex_timer_ch;         /* callout timer context */
 int pipex_prune = 1;                   /* [I] walk list every seconds */
 
+struct mbuf_queue pipexoutq = MBUF_QUEUE_INITIALIZER(
+    IFQ_MAXLEN, IPL_SOFTNET);
+
 /* borrow an mbuf pkthdr field */
 #define ph_ppp_proto ether_vtag
 
@@ -194,6 +198,46 @@ pipex_ioctl(void *ownersc, u_long cmd, c
 }
 
 /************************************************************************
+ * Software Interrupt Handler
+ ************************************************************************/
+
+void
+pipexintr(void)
+{
+       struct mbuf_list ml;
+       struct mbuf *m;
+       struct pipex_session *session;
+
+       NET_ASSERT_LOCKED();
+
+       mq_delist(&pipexoutq, &ml);
+
+       while ((m = ml_dequeue(&ml)) != NULL) {
+               struct ifnet *ifp;
+
+               session = m->m_pkthdr.ph_cookie;
+
+               ifp = if_get(session->proto.pppoe.over_ifidx);
+               if (ifp != NULL) {
+                       struct pipex_pppoe_header *pppoe;
+                       int len;
+
+                       pppoe = mtod(m, struct pipex_pppoe_header *);
+                       len = ntohs(pppoe->length);
+                       ifp->if_output(ifp, m, &session->peer.sa, NULL);
+                       counters_pkt(session->stat_counters, pxc_opackets,
+                           pxc_obytes, len);
+               } else {
+                       m_freem(m);
+                       counters_inc(session->stat_counters, pxc_oerrors);
+               }
+               if_put(ifp);
+
+               pipex_rele_session(session);
+       }
+}
+
+/************************************************************************
  * Session management functions
  ************************************************************************/
 int
@@ -1259,7 +1303,6 @@ Static void
 pipex_pppoe_output(struct mbuf *m0, struct pipex_session *session)
 {
        struct pipex_pppoe_header *pppoe;
-       struct ifnet *ifp;
        int len, padlen;
 
        /* save length for pppoe header */
@@ -1286,18 +1329,15 @@ pipex_pppoe_output(struct mbuf *m0, stru
        pppoe->length = htons(len);
 
        m0->m_pkthdr.ph_ifidx = session->proto.pppoe.over_ifidx;
+       refcnt_take(&session->pxs_refcnt);
+       m0->m_pkthdr.ph_cookie = session;
        m0->m_flags &= ~(M_BCAST|M_MCAST);
 
-       ifp = if_get(session->proto.pppoe.over_ifidx);
-       if (ifp != NULL) {
-               ifp->if_output(ifp, m0, &session->peer.sa, NULL);
-               counters_pkt(session->stat_counters, pxc_opackets,
-                   pxc_obytes, len);
-       } else {
-               m_freem(m0);
+       if (mq_enqueue(&pipexoutq, m0) != 0) {
                counters_inc(session->stat_counters, pxc_oerrors);
-       }
-       if_put(ifp);
+               pipex_rele_session(session);
+       } else
+               schednetisr(NETISR_PIPEX);
 }
 #endif /* PIPEX_PPPOE */
 

Reply via email to