Hi,

Let me update the diff.  Previous has a problem in ipsp_spd_lookup()
which uses "rn" without initialization.

On Sat, 20 Nov 2021 21:44:20 +0900 (JST)
YASUOKA Masahiko <yasu...@openbsd.org> wrote:
> On Wed, 12 May 2021 19:11:09 +0900 (JST)
> YASUOKA Masahiko <yasu...@openbsd.org> wrote:
>> Radek reported a problem to misc@ that multiple Windows clients behind
>> a NAT cannot use a L2TP/IPsec server simultaneously.
>> 
>> https://marc.info/?t=160996816100001&r=1&w=2
>> 
>> There is two problems.  First is pipex(4) doesn't pass the proper
>> ipsecflowinfo to ip_output().  Second is the IPsec policy check which
>> is done by ipsp_spd_lookup() returns -1 (EINVAL) if the given tdb is
>> not cached.  This happens when its flow is shared by another tdb (for
>> another client of the same NAT).
> 
> This problem is not fixed yet.  The diff for the second problem was
> not committed in.  It was to fix the check in ipsp_spd_lookup() by
> making a IPsec policy have a list of IDs.
> 
> Also my colleague Kawai pointed out there is another problem if there
> is a Linux client among with Windows clients behind a NAT.  Windows
> uses 1701/udp for its local ID, but the Linux uses ANY/udp for its
> local ID.
> 
> In the situation, policies will be overlapped.
> 
>   (a) Windows:  REMOTE_IP:1701/udp <=> LOCAL_IP:1701/udp
>   (b) Linux:    REMOTE_IP:ANY/udp  <=> LOCAL_IP:1701/udp
>   
> Since we use a radix tree for the policies, when rn_match() is used to
> find a policy, as it's best match, (b) is never selected.
> 
> Let me update the diff.
> 
> As for the incomming, we know the tdb when is used.  The diff uses the
> tdb to find the proper policy.
> 
> As for the outgoing, other than using "ipsecflowinfo" there is no way
> to select a proper policy.  So only when "ipsecflowinfo" is used, get
> a tdb from the packet flow and the IDs (retributed by the
> ipsecflowinfo), then we can find the proper policy by the tdb.
> 
> Also the diff skips the IDs check against the policy only if it is
> transport mode and using NAT-T.  Since when NAT-T is used for a policy
> for transport mode is shared by multiple clients which has a different
> IDs, checking the IDs is difficult and I think the checks other than
> is enough.
> 
> ok?  comments?
> 
> Fix some problems when accepting IPsec transport mode connections from
> multiple clients behind a NAT.  In the situation, policies can be
> overlapped, but previous could not choice a proper policy both for
> incoming and outgoing.  To solve this problem, use
> tdb->tdb_filter{,mask} to find a proper policy for incoming and find the
> tdb by the given ipsecflowinfo and use it for outgoing.  Also skip
> checking IDs of the policy since a policy is shared by multiple clients
> in the situation.

Index: sys/netinet/ip_ipsp.c
===================================================================
RCS file: /disk/cvs/openbsd/src/sys/netinet/ip_ipsp.c,v
retrieving revision 1.258
diff -u -p -r1.258 ip_ipsp.c
--- sys/netinet/ip_ipsp.c       29 Nov 2021 19:19:00 -0000      1.258
+++ sys/netinet/ip_ipsp.c       30 Nov 2021 04:44:48 -0000
@@ -90,6 +90,8 @@ void          tdb_firstuse(void *);
 void           tdb_soft_timeout(void *);
 void           tdb_soft_firstuse(void *);
 int            tdb_hash(u_int32_t, union sockaddr_union *, u_int8_t);
+int            sockaddr_encap_match(struct sockaddr_encap *,
+                   struct sockaddr_encap *, struct sockaddr_encap *);
 
 int ipsec_in_use = 0;
 u_int64_t ipsec_last_added = 0;
@@ -507,6 +509,76 @@ gettdbbysrc(u_int rdomain, union sockadd
        tdb_ref(tdbp);
        mtx_leave(&tdb_sadb_mtx);
        return tdbp;
+}
+
+/*
+ * Get an SA given the flow, the direction, the security protocol type, and
+ * the desired IDs.
+ */
+struct tdb *
+gettdbbyflow(u_int rdomain, int direction, struct sockaddr_encap *senflow,
+    u_int8_t sproto, struct ipsec_ids *ids)
+{
+       u_int32_t hashval;
+       struct tdb *tdbp;
+       union sockaddr_union srcdst;
+
+       if (ids == NULL)        /* ids is mandatory */
+               return NULL;
+
+       memset(&srcdst, 0, sizeof(srcdst));
+       switch (senflow->sen_type) {
+       case SENT_IP4:
+               srcdst.sin.sin_len = sizeof(srcdst.sin);
+               srcdst.sin.sin_family = AF_INET;
+               if (direction == IPSP_DIRECTION_OUT)
+                       srcdst.sin.sin_addr = senflow->Sen.Sip4.Dst;
+               else
+                       srcdst.sin.sin_addr = senflow->Sen.Sip4.Src;
+               break;
+       case SENT_IP6:
+               srcdst.sin6.sin6_len = sizeof(srcdst.sin6);
+               srcdst.sin6.sin6_family = AF_INET6;
+               if (direction == IPSP_DIRECTION_OUT)
+                       srcdst.sin6.sin6_addr = senflow->Sen.Sip6.Dst;
+               else
+                       srcdst.sin6.sin6_addr = senflow->Sen.Sip6.Src;
+               break;
+       }
+
+       mtx_enter(&tdb_sadb_mtx);
+       hashval = tdb_hash(0, &srcdst, sproto);
+
+       for (tdbp = tdbdst[hashval]; tdbp != NULL; tdbp = tdbp->tdb_dnext)
+               if (tdbp->tdb_sproto == sproto &&
+                   tdbp->tdb_rdomain == rdomain &&
+                   (tdbp->tdb_flags & TDBF_INVALID) == 0 &&
+                   ((direction == IPSP_DIRECTION_OUT &&
+                   !memcmp(&tdbp->tdb_dst, &srcdst, srcdst.sa.sa_len)) ||
+                   (direction == IPSP_DIRECTION_IN &&
+                   !memcmp(&tdbp->tdb_src, &srcdst, srcdst.sa.sa_len)))) {
+                       if (sockaddr_encap_match(&tdbp->tdb_filter,
+                           &tdbp->tdb_filtermask, senflow))
+                               break;
+               }
+
+       mtx_leave(&tdb_sadb_mtx);
+       return tdbp;
+}
+
+int
+sockaddr_encap_match(struct sockaddr_encap *addr, struct sockaddr_encap *mask,
+    struct sockaddr_encap *dest)
+{
+       size_t  off;
+
+       for (off = offsetof(struct sockaddr_encap, sen_type);
+           off < dest->sen_len; off++) {
+               if ((*((u_char *)addr + off) & *((u_char *)mask + off)) !=
+                   (*((u_char *)dest + off) & *((u_char *)mask + off)))
+                       break;
+       }
+       return (off < dest->sen_len)? 0 : 1;
 }
 
 #ifdef DDB
Index: sys/netinet/ip_ipsp.h
===================================================================
RCS file: /disk/cvs/openbsd/src/sys/netinet/ip_ipsp.h,v
retrieving revision 1.223
diff -u -p -r1.223 ip_ipsp.h
--- sys/netinet/ip_ipsp.h       26 Nov 2021 16:16:35 -0000      1.223
+++ sys/netinet/ip_ipsp.h       30 Nov 2021 04:44:48 -0000
@@ -565,6 +565,8 @@ struct      tdb *gettdbbysrcdst_dir(u_int, u_
                union sockaddr_union *, u_int8_t, int);
 #define gettdbbysrcdst(a,b,c,d,e) gettdbbysrcdst_dir((a),(b),(c),(d),(e),0)
 #define gettdbbysrcdst_rev(a,b,c,d,e) gettdbbysrcdst_dir((a),(b),(c),(d),(e),1)
+struct tdb *gettdbbyflow(u_int, int, struct sockaddr_encap *, u_int8_t,
+               struct ipsec_ids *);
 void   puttdb(struct tdb *);
 void   puttdb_locked(struct tdb *);
 void   tdb_delete(struct tdb *);
Index: sys/netinet/ip_spd.c
===================================================================
RCS file: /disk/cvs/openbsd/src/sys/netinet/ip_spd.c,v
retrieving revision 1.105
diff -u -p -r1.105 ip_spd.c
--- sys/netinet/ip_spd.c        25 Nov 2021 13:46:02 -0000      1.105
+++ sys/netinet/ip_spd.c        30 Nov 2021 04:44:48 -0000
@@ -147,7 +147,7 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
     struct tdb *tdbp, struct inpcb *inp, u_int32_t ipsecflowinfo)
 {
        struct radix_node_head *rnh;
-       struct radix_node *rn;
+       struct radix_node *rn = NULL;
        union sockaddr_union sdst, ssrc;
        struct sockaddr_encap *ddst, dst;
        struct ipsec_policy *ipo;
@@ -177,6 +177,8 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                return NULL;
        }
 
+       if (ipsecflowinfo != 0)
+               ids = ipsp_ids_lookup(ipsecflowinfo);
        memset(&dst, 0, sizeof(dst));
        memset(&sdst, 0, sizeof(union sockaddr_union));
        memset(&ssrc, 0, sizeof(union sockaddr_union));
@@ -299,9 +301,28 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                return NULL;
        }
 
+       /*
+        * When there are multiple clients behind a NAT, policies can be
+        * overlapped.  When it happens it's impossible to find a correct one
+        * only by the flow.  Use ipsecflowinfo and get an SA first to solve
+        * the situation.
+        */
+       if (ipsecflowinfo != 0 && ids != NULL) {
+               KASSERT(tdbp == NULL);
+               KASSERT(direction == IPSP_DIRECTION_OUT);
+               tdbp = gettdbbyflow(rdomain, direction, &dst, IPPROTO_ESP, ids);
+       }
+
        /* Actual SPD lookup. */
-       if ((rnh = spd_table_get(rdomain)) == NULL ||
-           (rn = rn_match((caddr_t)&dst, rnh)) == NULL) {
+       rnh = spd_table_get(rdomain);
+       if (rnh != NULL) {
+               if (tdbp != NULL)
+                       rn = rn_lookup((caddr_t)&tdbp->tdb_filter,
+                           (caddr_t)&tdbp->tdb_filtermask, rnh);
+               else
+                       rn = rn_match((caddr_t)&dst, rnh);
+       }
+       if (rn == NULL) {
                /*
                 * Return whatever the socket requirements are, there are no
                 * system-wide policies.
@@ -396,9 +417,6 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                        }
                }
 
-               if (ipsecflowinfo)
-                       ids = ipsp_ids_lookup(ipsecflowinfo);
-
                /* Check that the cached TDB (if present), is appropriate. */
                if (ipo->ipo_tdb != NULL) {
                        if ((ipo->ipo_last_searched <= ipsec_last_added) ||
@@ -517,10 +535,19 @@ ipsp_spd_lookup(struct mbuf *m, int af, 
                                goto nomatchin;
 
                        /* Match source/dest IDs. */
-                       if (ipo->ipo_ids)
-                               if (tdbp->tdb_ids == NULL ||
-                                   !ipsp_ids_match(ipo->ipo_ids, 
tdbp->tdb_ids))
+                       if (ipo->ipo_ids != NULL) {
+                               if ((tdbp->tdb_flags & TDBF_TUNNELING) == 0 &&
+                                   (tdbp->tdb_flags & TDBF_UDPENCAP) != 0) {
+                                       /*
+                                        * Skip IDs check for transport mode
+                                        * with NAT-T.  Multiple clients (IDs)
+                                        * can use a same policy.
+                                        */
+                               } else if (tdbp->tdb_ids == NULL &&
+                                   !ipsp_ids_match(ipo->ipo_ids,
+                                   tdbp->tdb_ids))
                                        goto nomatchin;
+                       }
 
                        /* Add it to the cache. */
                        if (ipo->ipo_tdb != NULL) {

Reply via email to