On Fri, Dec 24, 2021 at 12:50:23PM +0100, Alexander Bluhm wrote:
> On Fri, Dec 24, 2021 at 04:16:28PM +0900, YASUOKA Masahiko wrote:
> > > - npppd l2pt ipsecflowinfo is not MP safe
> > 
> > Does this mean the things we are discussing on the "Fix
> > ipsp_spd_lookup() for transport mode" thread?  I wonder if there is
> > another issue.
> 
> In this mail thread I was concerned about things might get worse.
> 
> Currently I see these problems:
> 
> tdb_free() will be called with a shared netlock.  From there
> ipsp_ids_free() is called.
> 
>         if (--ids->id_refcount > 0)
>                 return;
> 
> This ref count needs to be atomic.
> 
>         if (LIST_EMPTY(&ipsp_ids_gc_list))
>                 timeout_add_sec(&ipsp_ids_gc_timeout, 1);
>         LIST_INSERT_HEAD(&ipsp_ids_gc_list, ids, id_gc_list);
> 
> And some mutex should protect ipsp_ids_gc_list.
> 
> bluhm
> 

The diff below adds `ipsec_flows_mtx' mutex(9) to protect `ipsp_ids_*'
list and trees. ipsp_ids_lookup() returns `ids' with bumped reference
counter.

Index: sys/netinet/ip_ipsp.c
===================================================================
RCS file: /cvs/src/sys/netinet/ip_ipsp.c,v
retrieving revision 1.267
diff -u -p -r1.267 ip_ipsp.c
--- sys/netinet/ip_ipsp.c       20 Dec 2021 15:59:09 -0000      1.267
+++ sys/netinet/ip_ipsp.c       25 Dec 2021 18:34:22 -0000
@@ -47,6 +47,8 @@
 #include <sys/kernel.h>
 #include <sys/timeout.h>
 #include <sys/pool.h>
+#include <sys/atomic.h>
+#include <sys/mutex.h>
 
 #include <net/if.h>
 #include <net/route.h>
@@ -84,6 +86,13 @@ void tdb_hashstats(void);
        do { } while (0)
 #endif
 
+/*
+ * Locks used to protect global data and struct members:
+ *     F       ipsec_flows_mtx
+ */
+
+struct mutex ipsec_flows_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
+
 int            tdb_rehash(void);
 void           tdb_timeout(void *);
 void           tdb_firstuse(void *);
@@ -98,16 +107,16 @@ int ipsec_ids_idle = 100;          /* keep free 
 struct pool tdb_pool;
 
 /* Protected by the NET_LOCK(). */
-u_int32_t ipsec_ids_next_flow = 1;     /* may not be zero */
-struct ipsec_ids_tree ipsec_ids_tree;
-struct ipsec_ids_flows ipsec_ids_flows;
+u_int32_t ipsec_ids_next_flow = 1;             /* [F] may not be zero */
+struct ipsec_ids_tree ipsec_ids_tree;          /* [F] */
+struct ipsec_ids_flows ipsec_ids_flows;                /* [F] */
 struct ipsec_policy_head ipsec_policy_head =
     TAILQ_HEAD_INITIALIZER(ipsec_policy_head);
 
 void ipsp_ids_gc(void *);
 
 LIST_HEAD(, ipsec_ids) ipsp_ids_gc_list =
-    LIST_HEAD_INITIALIZER(ipsp_ids_gc_list);
+    LIST_HEAD_INITIALIZER(ipsp_ids_gc_list);   /* [F] */
 struct timeout ipsp_ids_gc_timeout =
     TIMEOUT_INITIALIZER_FLAGS(ipsp_ids_gc, NULL, TIMEOUT_PROC);
 
@@ -1191,21 +1200,25 @@ ipsp_ids_insert(struct ipsec_ids *ids)
        struct ipsec_ids *found;
        u_int32_t start_flow;
 
-       NET_ASSERT_LOCKED();
+       mtx_enter(&ipsec_flows_mtx);
 
        found = RBT_INSERT(ipsec_ids_tree, &ipsec_ids_tree, ids);
        if (found) {
                /* if refcount was zero, then timeout is running */
-               if (found->id_refcount++ == 0) {
+               if (atomic_inc_int_nv(&found->id_refcount) == 1) {
                        LIST_REMOVE(found, id_gc_list);
 
                        if (LIST_EMPTY(&ipsp_ids_gc_list))
                                timeout_del(&ipsp_ids_gc_timeout);
                }
+               mtx_leave (&ipsec_flows_mtx);
                DPRINTF("ids %p count %d", found, found->id_refcount);
                return found;
        }
+
+       ids->id_refcount = 1;
        ids->id_flow = start_flow = ipsec_ids_next_flow;
+
        if (++ipsec_ids_next_flow == 0)
                ipsec_ids_next_flow = 1;
        while (RBT_INSERT(ipsec_ids_flows, &ipsec_ids_flows, ids) != NULL) {
@@ -1214,12 +1227,13 @@ ipsp_ids_insert(struct ipsec_ids *ids)
                        ipsec_ids_next_flow = 1;
                if (ipsec_ids_next_flow == start_flow) {
                        RBT_REMOVE(ipsec_ids_tree, &ipsec_ids_tree, ids);
+                       mtx_leave(&ipsec_flows_mtx);
                        DPRINTF("ipsec_ids_next_flow exhausted %u",
-                           ipsec_ids_next_flow);
+                           start_flow);
                        return NULL;
                }
        }
-       ids->id_refcount = 1;
+       mtx_leave(&ipsec_flows_mtx);
        DPRINTF("new ids %p flow %u", ids, ids->id_flow);
        return ids;
 }
@@ -1228,11 +1242,16 @@ struct ipsec_ids *
 ipsp_ids_lookup(u_int32_t ipsecflowinfo)
 {
        struct ipsec_ids        key;
-
-       NET_ASSERT_LOCKED();
+       struct ipsec_ids        *ids;
 
        key.id_flow = ipsecflowinfo;
-       return RBT_FIND(ipsec_ids_flows, &ipsec_ids_flows, &key);
+
+       mtx_enter(&ipsec_flows_mtx);
+       ids = RBT_FIND(ipsec_ids_flows, &ipsec_ids_flows, &key);
+       atomic_inc_int(&ids->id_refcount);
+       mtx_leave(&ipsec_flows_mtx);
+
+       return ids;
 }
 
 /* free ids only from delayed timeout */
@@ -1241,7 +1260,7 @@ ipsp_ids_gc(void *arg)
 {
        struct ipsec_ids *ids, *tids;
 
-       NET_LOCK();
+       mtx_enter(&ipsec_flows_mtx);
 
        LIST_FOREACH_SAFE(ids, &ipsp_ids_gc_list, id_gc_list, tids) {
                KASSERT(ids->id_refcount == 0);
@@ -1261,14 +1280,15 @@ ipsp_ids_gc(void *arg)
        if (!LIST_EMPTY(&ipsp_ids_gc_list))
                timeout_add_sec(&ipsp_ids_gc_timeout, 1);
 
-       NET_UNLOCK();
+       mtx_leave(&ipsec_flows_mtx);
 }
 
 /* decrements refcount, actual free happens in gc */
 void
 ipsp_ids_free(struct ipsec_ids *ids)
 {
-       NET_ASSERT_LOCKED();
+       if (ids == NULL)
+               return;
 
        /*
         * If the refcount becomes zero, then a timeout is started. This
@@ -1277,9 +1297,11 @@ ipsp_ids_free(struct ipsec_ids *ids)
        DPRINTF("ids %p count %d", ids, ids->id_refcount);
        KASSERT(ids->id_refcount > 0);
 
-       if (--ids->id_refcount > 0)
+       if (atomic_dec_int_nv(&ids->id_refcount) > 0)
                return;
 
+       mtx_enter(&ipsec_flows_mtx);
+
        /*
         * Add second for the case ipsp_ids_gc() is already running and
         * awaits netlock to be released.
@@ -1289,6 +1311,8 @@ ipsp_ids_free(struct ipsec_ids *ids)
        if (LIST_EMPTY(&ipsp_ids_gc_list))
                timeout_add_sec(&ipsp_ids_gc_timeout, 1);
        LIST_INSERT_HEAD(&ipsp_ids_gc_list, ids, id_gc_list);
+
+       mtx_leave(&ipsec_flows_mtx);
 }
 
 static int
Index: sys/netinet/ip_ipsp.h
===================================================================
RCS file: /cvs/src/sys/netinet/ip_ipsp.h,v
retrieving revision 1.233
diff -u -p -r1.233 ip_ipsp.h
--- sys/netinet/ip_ipsp.h       20 Dec 2021 15:59:10 -0000      1.233
+++ sys/netinet/ip_ipsp.h       25 Dec 2021 18:34:22 -0000
@@ -40,6 +40,14 @@
 #ifndef _NETINET_IPSP_H_
 #define _NETINET_IPSP_H_
 
+/*
+ * Locks used to protect struct members in this file:
+ *     I       Immutable after creation
+ *     F       ipsec_flows_mtx
+ *     a       atomic
+ *     p       ipo_tdb_mtx             link policy to TDB global mutex
+ */
+
 /* IPSP global definitions. */
 
 #include <sys/types.h>
@@ -223,14 +231,14 @@ struct ipsec_id {
 };
 
 struct ipsec_ids {
-       LIST_ENTRY(ipsec_ids)   id_gc_list;
-       RBT_ENTRY(ipsec_ids)    id_node_id;
-       RBT_ENTRY(ipsec_ids)    id_node_flow;
-       struct ipsec_id         *id_local;
-       struct ipsec_id         *id_remote;
-       u_int32_t               id_flow;
-       int                     id_refcount;
-       u_int                   id_gc_ttl;
+       LIST_ENTRY(ipsec_ids)   id_gc_list;     /* [F] */
+       RBT_ENTRY(ipsec_ids)    id_node_id;     /* [F] */
+       RBT_ENTRY(ipsec_ids)    id_node_flow;   /* [F] */
+       struct ipsec_id         *id_local;      /* [I] */
+       struct ipsec_id         *id_remote;     /* [I] */
+       u_int32_t               id_flow;        /* [I] */
+       u_int                   id_refcount;    /* [a] */
+       u_int                   id_gc_ttl;      /* [F] */
 };
 RBT_HEAD(ipsec_ids_flows, ipsec_ids);
 RBT_HEAD(ipsec_ids_tree, ipsec_ids);
@@ -246,10 +254,6 @@ struct ipsec_acquire {
        TAILQ_ENTRY(ipsec_acquire)      ipa_next;
 };
 
-/*
- * Locks used to protect struct members in this file:
- *     p       ipo_tdb_mtx             link policy to TDB global mutex
- */
 struct ipsec_policy {
        struct radix_node       ipo_nodes[2];   /* radix tree glue */
        struct sockaddr_encap   ipo_addr;
Index: sys/netinet/ip_spd.c
===================================================================
RCS file: /cvs/src/sys/netinet/ip_spd.c,v
retrieving revision 1.110
diff -u -p -r1.110 ip_spd.c
--- sys/netinet/ip_spd.c        16 Dec 2021 15:38:03 -0000      1.110
+++ sys/netinet/ip_spd.c        25 Dec 2021 18:34:22 -0000
@@ -418,6 +418,7 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                        /* Cached entry is good. */
                        error = ipsp_spd_inp(m, inp, ipo, tdbout);
                        mtx_leave(&ipo_tdb_mtx);
+                       ipsp_ids_free(ids);
                        return error;
 
   nomatchout:
@@ -452,6 +453,7 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                            dignore ? &sdst : &ipo->ipo_dst,
                            ipo->ipo_sproto, ids ? ids: ipo->ipo_ids,
                            &ipo->ipo_addr, &ipo->ipo_mask);
+                       ipsp_ids_free(ids);
                        mtx_enter(&ipo_tdb_mtx);
                        if ((tdbp_new != NULL) &&
                            (tdbp_new->tdb_flags & TDBF_DELETED)) {

Reply via email to