Re: tcp syn cache random reseed

2016-03-29 Thread Martin Pieuchot
On 28/03/16(Mon) 23:56, Alexander Bluhm wrote:
> On Mon, Mar 21, 2016 at 12:58:41PM +0100, Alexander Bluhm wrote:
> > The attack I see is that you can measure the bucket distribution
> > by timing the SYN+ACK response.  You can collect samples that end
> > in the same bucket.  After you have collected enough, start your
> > DoS attack.  I think that just collecting data is also possible
> > with a strong hash function.  With a weak function you may collect
> > less and can start guessing early on top of that.  But reseeding
> > after a number of packets prevents to collect information over a
> > long peroid.
> 
> The syn cache already detects when it has too many bucket collisions.
> That seems a good moment to reseed the hash function.

Makes sense to me.  The "> 0" check made me wonder.  Can't you simply
use an unsigned variable and always set it to 0?

> ok?

ok mpi@

> Index: netinet/tcp_input.c
> ===
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
> retrieving revision 1.316
> diff -u -p -r1.316 tcp_input.c
> --- netinet/tcp_input.c   27 Mar 2016 19:19:01 -  1.316
> +++ netinet/tcp_input.c   28 Mar 2016 21:51:20 -
> @@ -3400,6 +3400,12 @@ syn_cache_insert(struct syn_cache *sc, s
>   if (scp->sch_length >= tcp_syn_bucket_limit) {
>   tcpstat.tcps_sc_bucketoverflow++;
>   /*
> +  * Someone might attack our bucket hash function.  Reseed
> +  * with random as soon as the passive syn cache gets empty.
> +  */
> + if (set->scs_use > 0)
> + set->scs_use = 0;
> + /*
>* The bucket is full.  Toss the oldest element in the
>* bucket.  This will be the first entry in the bucket.
>*/
> 



Re: tcp syn cache random reseed

2016-03-28 Thread Alexander Bluhm
On Mon, Mar 21, 2016 at 12:58:41PM +0100, Alexander Bluhm wrote:
> The attack I see is that you can measure the bucket distribution
> by timing the SYN+ACK response.  You can collect samples that end
> in the same bucket.  After you have collected enough, start your
> DoS attack.  I think that just collecting data is also possible
> with a strong hash function.  With a weak function you may collect
> less and can start guessing early on top of that.  But reseeding
> after a number of packets prevents to collect information over a
> long peroid.

The syn cache already detects when it has too many bucket collisions.
That seems a good moment to reseed the hash function.

ok?

bluhm

Index: netinet/tcp_input.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
retrieving revision 1.316
diff -u -p -r1.316 tcp_input.c
--- netinet/tcp_input.c 27 Mar 2016 19:19:01 -  1.316
+++ netinet/tcp_input.c 28 Mar 2016 21:51:20 -
@@ -3400,6 +3400,12 @@ syn_cache_insert(struct syn_cache *sc, s
if (scp->sch_length >= tcp_syn_bucket_limit) {
tcpstat.tcps_sc_bucketoverflow++;
/*
+* Someone might attack our bucket hash function.  Reseed
+* with random as soon as the passive syn cache gets empty.
+*/
+   if (set->scs_use > 0)
+   set->scs_use = 0;
+   /*
 * The bucket is full.  Toss the oldest element in the
 * bucket.  This will be the first entry in the bucket.
 */



Re: tcp syn cache random reseed

2016-03-28 Thread Alexander Bluhm
On Mon, Mar 21, 2016 at 11:05:25AM +0100, Martin Pieuchot wrote:
> I like it.  Do you think it could be useful to export the value of the
> current active cache set and/or the value of ``tcp_syn_use_limit''?

When the active cache set switches, the reseed counter increments.
It might be usefull to see the current fill and use counter, but
that does not fit well in the tcpstat netstat counters.

The tcp_syn_use_limit can be added as sysctl.  Although we don't
like knobs, this one is useful to test the feature and may be handy
to defend against syn-flood in a denial of service condition.  It
is consistent to the existing two syn cache sysctls.

net.inet.tcp.syncachelimit=10255
net.inet.tcp.synbucketlimit=105
net.inet.tcp.synuselimit=10

I moved some declarations to tcp_var.h to access syn_cache_set from
tcp_sysctl().  Note that TCPCTL_VARS had a missing NULL before.

ok?

bluhm

Index: netinet/tcp_input.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
retrieving revision 1.316
diff -u -p -r1.316 tcp_input.c
--- netinet/tcp_input.c 27 Mar 2016 19:19:01 -  1.316
+++ netinet/tcp_input.c 28 Mar 2016 21:26:14 -
@@ -3255,19 +3255,12 @@ tcp_mss_adv(struct mbuf *m, int af)
  */
 
 /* syn hash parameters */
-#defineTCP_SYN_HASH_SIZE   293
-#defineTCP_SYN_BUCKET_SIZE 35
 inttcp_syn_cache_size = TCP_SYN_HASH_SIZE;
 inttcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
 inttcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
 inttcp_syn_use_limit = 10;
 
-struct syn_cache_set {
-struct syn_cache_head scs_buckethead[TCP_SYN_HASH_SIZE];
-intscs_count;
-intscs_use;
-u_int32_t  scs_random[5];
-} tcp_syn_cache[2];
+struct syn_cache_set tcp_syn_cache[2];
 int tcp_syn_cache_active;
 
 #define SYN_HASH(sa, sp, dp, rand) \
Index: netinet/tcp_usrreq.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_usrreq.c,v
retrieving revision 1.129
diff -u -p -r1.129 tcp_usrreq.c
--- netinet/tcp_usrreq.c23 Mar 2016 15:50:36 -  1.129
+++ netinet/tcp_usrreq.c28 Mar 2016 21:48:23 -
@@ -933,6 +933,23 @@ tcp_sysctl(name, namelen, oldp, oldlenp,
return (sysctl_struct(oldp, oldlenp, newp, newlen,
&tcpstat, sizeof(tcpstat)));
 
+   case TCPCTL_SYN_USE_LIMIT:
+   error = sysctl_int(oldp, oldlenp, newp, newlen,
+   &tcp_syn_use_limit);
+   if (error)
+   return (error);
+   if (newp != NULL) {
+   /*
+* Global tcp_syn_use_limit is used when reseeding a
+* new cache.  Also update the value in active cache.
+*/
+   if (tcp_syn_cache[0].scs_use > tcp_syn_use_limit)
+   tcp_syn_cache[0].scs_use = tcp_syn_use_limit;
+   if (tcp_syn_cache[1].scs_use > tcp_syn_use_limit)
+   tcp_syn_cache[1].scs_use = tcp_syn_use_limit;
+   }
+   return (0);
+
default:
if (name[0] < TCPCTL_MAXID)
return (sysctl_int_arr(tcpctl_vars, name, namelen,
Index: netinet/tcp_var.h
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_var.h,v
retrieving revision 1.111
diff -u -p -r1.111 tcp_var.h
--- netinet/tcp_var.h   27 Mar 2016 19:19:01 -  1.111
+++ netinet/tcp_var.h   28 Mar 2016 21:26:14 -
@@ -251,6 +251,10 @@ struct tcp_opt_info {
 /*
  * Data for the TCP compressed state engine.
  */
+
+#defineTCP_SYN_HASH_SIZE   293
+#defineTCP_SYN_BUCKET_SIZE 35
+
 union syn_cache_sa {
struct sockaddr sa;
struct sockaddr_in sin;
@@ -311,6 +315,13 @@ struct syn_cache_head {
u_short sch_length; /* # entries in bucket */
 };
 
+struct syn_cache_set {
+struct syn_cache_head scs_buckethead[TCP_SYN_HASH_SIZE];
+intscs_count;
+intscs_use;
+u_int32_t  scs_random[5];
+};
+
 #endif /* _KERNEL */
 
 /*
@@ -478,7 +489,8 @@ struct  tcpstat {
 #defineTCPCTL_SACKHOLE_LIMIT  20 /* max entries for tcp sack queues */
 #defineTCPCTL_STATS   21 /* TCP statistics */
 #defineTCPCTL_ALWAYS_KEEPALIVE 22 /* assume SO_KEEPALIVE is always set 
*/
-#defineTCPCTL_MAXID   23
+#defineTCPCTL_SYN_USE_LIMIT   23 /* number of uses before reseeding 
hash */
+#defineTCPCTL_MAXID   24
 
 #defineTCPCTL_NAMES { \
{ 0, 0 }, \
@@ -503,7 +515,8 @@ struct  tcpstat {
{ "drop",   CTLTYPE_STRUCT }, \
{ "sackholelimit",  CTLTYPE_INT

Re: tcp syn cache random reseed

2016-03-24 Thread Ted Unangst
Alexander Bluhm wrote:
> On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
> > The drawback is that the the cache lookup has to be done in two syn
> > caches when an ACK arrives.
> 
> This can be prevented most of the time.  Switch the cache only after
> 10 uses.  So most of the time the passive cache is empty and
> then no lookup is done.

agree with the concept.



Re: tcp syn cache random reseed

2016-03-21 Thread Claudio Jeker
On Sun, Mar 20, 2016 at 07:28:45PM +0100, Alexander Bluhm wrote:
> On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
> > Perhaps the tcps_sc_seedrandom counter with a netstat -s line should
> > be commited anyway to show the problem.
> 
> ok?

OK claudio@
 
> bluhm
> 
> Index: sys/netinet/tcp_input.c
> ===
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
> retrieving revision 1.314
> diff -u -p -r1.314 tcp_input.c
> --- sys/netinet/tcp_input.c   7 Mar 2016 18:44:00 -   1.314
> +++ sys/netinet/tcp_input.c   19 Mar 2016 20:09:25 -
> @@ -3371,8 +3371,10 @@ syn_cache_insert(struct syn_cache *sc, s
>* If there are no entries in the hash table, reinitialize
>* the hash secrets.
>*/
> - if (tcp_syn_cache_count == 0)
> + if (tcp_syn_cache_count == 0) {
>   arc4random_buf(tcp_syn_hash, sizeof(tcp_syn_hash));
> + tcpstat.tcps_sc_seedrandom++;
> + }
>  
>   SYN_HASHALL(sc->sc_hash, &sc->sc_src.sa, &sc->sc_dst.sa);
>   sc->sc_bucketidx = sc->sc_hash % tcp_syn_cache_size;
> Index: sys/netinet/tcp_var.h
> ===
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_var.h,v
> retrieving revision 1.109
> diff -u -p -r1.109 tcp_var.h
> --- sys/netinet/tcp_var.h 27 Aug 2015 20:56:16 -  1.109
> +++ sys/netinet/tcp_var.h 19 Mar 2016 20:53:39 -
> @@ -440,6 +440,7 @@ structtcpstat {
>   u_int64_t tcps_sc_dropped;  /* # of SYNs dropped (no route/mem) */
>   u_int64_t tcps_sc_collisions;   /* # of hash collisions */
>   u_int64_t tcps_sc_retransmitted;/* # of retransmissions */
> + u_int64_t tcps_sc_seedrandom;   /* # of syn cache seeds with random */
>  
>   u_int64_t tcps_conndrained; /* # of connections drained */
>  
> Index: usr.bin/netstat/inet.c
> ===
> RCS file: /data/mirror/openbsd/cvs/src/usr.bin/netstat/inet.c,v
> retrieving revision 1.144
> diff -u -p -r1.144 inet.c
> --- usr.bin/netstat/inet.c20 Aug 2015 22:32:41 -  1.144
> +++ usr.bin/netstat/inet.c20 Mar 2016 18:25:55 -
> @@ -455,6 +455,7 @@ tcp_stats(char *name)
>   p(tcps_sc_dupesyn, "\t%qd duplicate SYN%s received for entries "
>   "already in the cache\n");
>   p(tcps_sc_dropped, "\t%qd SYN%s dropped (no route or no space)\n");
> + p(tcps_sc_seedrandom, "\t%qd SYN cache seed%s with new random\n");
>  
>   p(tcps_sack_recovery_episode, "\t%qd SACK recovery episode%s\n");
>   p(tcps_sack_rexmits,
> 

-- 
:wq Claudio



Re: tcp syn cache random reseed

2016-03-21 Thread Alexander Bluhm
On Mon, Mar 21, 2016 at 08:25:59PM +1000, David Gwynne wrote:
> how can i judge if this is better than just using a single hash with a strong 
> function?

The attack I see is that you can measure the bucket distribution
by timing the SYN+ACK response.  You can collect samples that end
in the same bucket.  After you have collected enough, start your
DoS attack.  I think that just collecting data is also possible
with a strong hash function.  With a weak function you may collect
less and can start guessing early on top of that.  But reseeding
after a number of packets prevents to collect information over a
long peroid.

Unfortunately I have no analysis or prcatical experience with timing
attacks.  It is just a conclusion from reading the code.

bluhm



Re: tcp syn cache random reseed

2016-03-21 Thread David Gwynne

> On 21 Mar 2016, at 4:28 AM, Alexander Bluhm  wrote:
> 
> On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
>> Perhaps the tcps_sc_seedrandom counter with a netstat -s line should
>> be commited anyway to show the problem.
> 
> ok?

how can i judge if this is better than just using a single hash with a strong 
function?


Re: tcp syn cache random reseed

2016-03-21 Thread Martin Pieuchot
On 20/03/16(Sun) 19:19, Alexander Bluhm wrote:
> On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
> > The drawback is that the the cache lookup has to be done in two syn
> > caches when an ACK arrives.
> 
> This can be prevented most of the time.  Switch the cache only after
> 10 uses.  So most of the time the passive cache is empty and
> then no lookup is done.

I like it.  Do you think it could be useful to export the value of the
current active cache set and/or the value of ``tcp_syn_use_limit''?

Anyway, your diff is ok.

> Index: netinet/tcp_input.c
> ===
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
> retrieving revision 1.314
> diff -u -p -r1.314 tcp_input.c
> --- netinet/tcp_input.c   7 Mar 2016 18:44:00 -   1.314
> +++ netinet/tcp_input.c   20 Mar 2016 17:47:08 -
> @@ -3260,40 +3260,46 @@ tcp_mss_adv(struct mbuf *m, int af)
>  int  tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
>  int  tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
>  int  tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
> -int  tcp_syn_cache_count;
> -struct   syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
> -u_int32_t tcp_syn_hash[5];
> -
> -#define SYN_HASH(sa, sp, dp) \
> - (((sa)->s_addr ^ tcp_syn_hash[0]) * \
> - (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
> +int  tcp_syn_use_limit = 10;
> +
> +struct syn_cache_set {
> +struct   syn_cache_head 
> scs_buckethead[TCP_SYN_HASH_SIZE];
> +int  scs_count;
> +int  scs_use;
> +u_int32_tscs_random[5];
> +} tcp_syn_cache[2];
> +int tcp_syn_cache_active;
> +
> +#define SYN_HASH(sa, sp, dp, rand) \
> + (((sa)->s_addr ^ (rand)[0]) *   \
> + (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
>  #ifndef INET6
> -#define  SYN_HASHALL(hash, src, dst) \
> +#define  SYN_HASHALL(hash, src, dst, rand) \
>  do { \
>   hash = SYN_HASH(&satosin(src)->sin_addr,\
>   satosin(src)->sin_port, \
> - satosin(dst)->sin_port);\
> + satosin(dst)->sin_port, (rand));\
>  } while (/*CONSTCOND*/ 0)
>  #else
> -#define SYN_HASH6(sa, sp, dp) \
> - (((sa)->s6_addr32[0] ^ tcp_syn_hash[0]) *   \
> - ((sa)->s6_addr32[1] ^ tcp_syn_hash[1]) *\
> - ((sa)->s6_addr32[2] ^ tcp_syn_hash[2]) *\
> - ((sa)->s6_addr32[3] ^ tcp_syn_hash[3]) *\
> - (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
> +#define SYN_HASH6(sa, sp, dp, rand) \
> + (((sa)->s6_addr32[0] ^ (rand)[0]) * \
> + ((sa)->s6_addr32[1] ^ (rand)[1]) *  \
> + ((sa)->s6_addr32[2] ^ (rand)[2]) *  \
> + ((sa)->s6_addr32[3] ^ (rand)[3]) *  \
> + (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
>  
> -#define SYN_HASHALL(hash, src, dst) \
> +#define SYN_HASHALL(hash, src, dst, rand) \
>  do { \
>   switch ((src)->sa_family) { \
>   case AF_INET:   \
>   hash = SYN_HASH(&satosin(src)->sin_addr,\
>   satosin(src)->sin_port, \
> - satosin(dst)->sin_port);\
> + satosin(dst)->sin_port, (rand));\
>   break;  \
>   case AF_INET6:  \
>   hash = SYN_HASH6(&satosin6(src)->sin6_addr, \
>   satosin6(src)->sin6_port,   \
> - satosin6(dst)->sin6_port);  \
> + satosin6(dst)->sin6_port, (rand));  \
>   break;  \
>   default:\
>   hash = 0;   \
> @@ -3305,13 +3311,12 @@ void
>  syn_cache_rm(struct syn_cache *sc)
>  {
>   sc->sc_flags |= SCF_DEAD;
> - TAILQ_REMOVE(&tcp_syn_cache[sc->sc_bucketidx].sch_bucket,
> - sc, sc_bucketq);
> + TAILQ_REMOVE(&sc->sc_buckethead->sch_bucket, sc, sc_bucketq);
>   sc->sc_tp = NULL;
>   LIST_REMOVE(sc, sc_tpq);
> - tcp_syn_cache[sc->sc_bucketidx].sch_length--;
> + sc->sc_buckethead->sch_length--;
>   timeout_del(&sc->sc_timer);
> - tcp_syn_

Re: tcp syn cache random reseed

2016-03-20 Thread Alexander Bluhm
On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
> Perhaps the tcps_sc_seedrandom counter with a netstat -s line should
> be commited anyway to show the problem.

ok?

bluhm

Index: sys/netinet/tcp_input.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
retrieving revision 1.314
diff -u -p -r1.314 tcp_input.c
--- sys/netinet/tcp_input.c 7 Mar 2016 18:44:00 -   1.314
+++ sys/netinet/tcp_input.c 19 Mar 2016 20:09:25 -
@@ -3371,8 +3371,10 @@ syn_cache_insert(struct syn_cache *sc, s
 * If there are no entries in the hash table, reinitialize
 * the hash secrets.
 */
-   if (tcp_syn_cache_count == 0)
+   if (tcp_syn_cache_count == 0) {
arc4random_buf(tcp_syn_hash, sizeof(tcp_syn_hash));
+   tcpstat.tcps_sc_seedrandom++;
+   }
 
SYN_HASHALL(sc->sc_hash, &sc->sc_src.sa, &sc->sc_dst.sa);
sc->sc_bucketidx = sc->sc_hash % tcp_syn_cache_size;
Index: sys/netinet/tcp_var.h
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_var.h,v
retrieving revision 1.109
diff -u -p -r1.109 tcp_var.h
--- sys/netinet/tcp_var.h   27 Aug 2015 20:56:16 -  1.109
+++ sys/netinet/tcp_var.h   19 Mar 2016 20:53:39 -
@@ -440,6 +440,7 @@ struct  tcpstat {
u_int64_t tcps_sc_dropped;  /* # of SYNs dropped (no route/mem) */
u_int64_t tcps_sc_collisions;   /* # of hash collisions */
u_int64_t tcps_sc_retransmitted;/* # of retransmissions */
+   u_int64_t tcps_sc_seedrandom;   /* # of syn cache seeds with random */
 
u_int64_t tcps_conndrained; /* # of connections drained */
 
Index: usr.bin/netstat/inet.c
===
RCS file: /data/mirror/openbsd/cvs/src/usr.bin/netstat/inet.c,v
retrieving revision 1.144
diff -u -p -r1.144 inet.c
--- usr.bin/netstat/inet.c  20 Aug 2015 22:32:41 -  1.144
+++ usr.bin/netstat/inet.c  20 Mar 2016 18:25:55 -
@@ -455,6 +455,7 @@ tcp_stats(char *name)
p(tcps_sc_dupesyn, "\t%qd duplicate SYN%s received for entries "
"already in the cache\n");
p(tcps_sc_dropped, "\t%qd SYN%s dropped (no route or no space)\n");
+   p(tcps_sc_seedrandom, "\t%qd SYN cache seed%s with new random\n");
 
p(tcps_sack_recovery_episode, "\t%qd SACK recovery episode%s\n");
p(tcps_sack_rexmits,



Re: tcp syn cache random reseed

2016-03-20 Thread Alexander Bluhm
On Sat, Mar 19, 2016 at 10:41:06PM +0100, Alexander Bluhm wrote:
> The drawback is that the the cache lookup has to be done in two syn
> caches when an ACK arrives.

This can be prevented most of the time.  Switch the cache only after
10 uses.  So most of the time the passive cache is empty and
then no lookup is done.

bluhm

Index: netinet/tcp_input.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
retrieving revision 1.314
diff -u -p -r1.314 tcp_input.c
--- netinet/tcp_input.c 7 Mar 2016 18:44:00 -   1.314
+++ netinet/tcp_input.c 20 Mar 2016 17:47:08 -
@@ -3260,40 +3260,46 @@ tcp_mss_adv(struct mbuf *m, int af)
 inttcp_syn_cache_size = TCP_SYN_HASH_SIZE;
 inttcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
 inttcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
-inttcp_syn_cache_count;
-struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
-u_int32_t tcp_syn_hash[5];
-
-#define SYN_HASH(sa, sp, dp) \
-   (((sa)->s_addr ^ tcp_syn_hash[0]) * \
-   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
+inttcp_syn_use_limit = 10;
+
+struct syn_cache_set {
+struct syn_cache_head scs_buckethead[TCP_SYN_HASH_SIZE];
+intscs_count;
+intscs_use;
+u_int32_t  scs_random[5];
+} tcp_syn_cache[2];
+int tcp_syn_cache_active;
+
+#define SYN_HASH(sa, sp, dp, rand) \
+   (((sa)->s_addr ^ (rand)[0]) *   \
+   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
 #ifndef INET6
-#defineSYN_HASHALL(hash, src, dst) \
+#defineSYN_HASHALL(hash, src, dst, rand) \
 do {   \
hash = SYN_HASH(&satosin(src)->sin_addr,\
satosin(src)->sin_port, \
-   satosin(dst)->sin_port);\
+   satosin(dst)->sin_port, (rand));\
 } while (/*CONSTCOND*/ 0)
 #else
-#define SYN_HASH6(sa, sp, dp) \
-   (((sa)->s6_addr32[0] ^ tcp_syn_hash[0]) *   \
-   ((sa)->s6_addr32[1] ^ tcp_syn_hash[1]) *\
-   ((sa)->s6_addr32[2] ^ tcp_syn_hash[2]) *\
-   ((sa)->s6_addr32[3] ^ tcp_syn_hash[3]) *\
-   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
+#define SYN_HASH6(sa, sp, dp, rand) \
+   (((sa)->s6_addr32[0] ^ (rand)[0]) * \
+   ((sa)->s6_addr32[1] ^ (rand)[1]) *  \
+   ((sa)->s6_addr32[2] ^ (rand)[2]) *  \
+   ((sa)->s6_addr32[3] ^ (rand)[3]) *  \
+   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
 
-#define SYN_HASHALL(hash, src, dst) \
+#define SYN_HASHALL(hash, src, dst, rand) \
 do {   \
switch ((src)->sa_family) { \
case AF_INET:   \
hash = SYN_HASH(&satosin(src)->sin_addr,\
satosin(src)->sin_port, \
-   satosin(dst)->sin_port);\
+   satosin(dst)->sin_port, (rand));\
break;  \
case AF_INET6:  \
hash = SYN_HASH6(&satosin6(src)->sin6_addr, \
satosin6(src)->sin6_port,   \
-   satosin6(dst)->sin6_port);  \
+   satosin6(dst)->sin6_port, (rand));  \
break;  \
default:\
hash = 0;   \
@@ -3305,13 +3311,12 @@ void
 syn_cache_rm(struct syn_cache *sc)
 {
sc->sc_flags |= SCF_DEAD;
-   TAILQ_REMOVE(&tcp_syn_cache[sc->sc_bucketidx].sch_bucket,
-   sc, sc_bucketq);
+   TAILQ_REMOVE(&sc->sc_buckethead->sch_bucket, sc, sc_bucketq);
sc->sc_tp = NULL;
LIST_REMOVE(sc, sc_tpq);
-   tcp_syn_cache[sc->sc_bucketidx].sch_length--;
+   sc->sc_buckethead->sch_length--;
timeout_del(&sc->sc_timer);
-   tcp_syn_cache_count--;
+   sc->sc_set->scs_count--;
 }
 
 void
@@ -3351,8 +3356,10 @@ syn_cache_init(void)
int i;
 
/* Initialize the hash buckets. */
-   for (i = 0; i < tcp_syn_cache_size; i++)
-   TAILQ_INIT(&tcp_syn_cache[i].sch_bucket);
+   for (i = 0; i < tcp_syn_cache

tcp syn cache random reseed

2016-03-19 Thread Alexander Bluhm
Hi,

To prevent attacks on the hash buckets of the syn cache, our TCP
stack reseeds the hash function every time the cache is empty.
Unfortunatly the attacker can prevent the reseeding by sending
unanswered SYN packes periodically.

I fix this by having an active syn cache that gets new entries and
a passive one that is idling out.  When the passive one is empty
it becomes active with a new random hash seed.

The drawback is that the the cache lookup has to be done in two syn
caches when an ACK arrives.

Perhaps the tcps_sc_seedrandom counter with a netstat -s line should
be commited anyway to show the problem.

I think this reseeding fix is independent from a decision if the
change the hash algorithm to sip hash.  I also have a diff for that.

comments?

bluhm

Index: netinet/tcp_input.c
===
RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
retrieving revision 1.314
diff -u -p -r1.314 tcp_input.c
--- netinet/tcp_input.c 7 Mar 2016 18:44:00 -   1.314
+++ netinet/tcp_input.c 19 Mar 2016 20:58:34 -
@@ -3260,40 +3260,44 @@ tcp_mss_adv(struct mbuf *m, int af)
 inttcp_syn_cache_size = TCP_SYN_HASH_SIZE;
 inttcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
 inttcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
-inttcp_syn_cache_count;
-struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
-u_int32_t tcp_syn_hash[5];
-
-#define SYN_HASH(sa, sp, dp) \
-   (((sa)->s_addr ^ tcp_syn_hash[0]) * \
-   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
+
+struct syn_cache_set {
+struct syn_cache_head scs_buckethead[TCP_SYN_HASH_SIZE];
+intscs_count;
+u_int32_t  scs_random[5];
+} tcp_syn_cache[2];
+int tcp_syn_cache_active;
+
+#define SYN_HASH(sa, sp, dp, rand) \
+   (((sa)->s_addr ^ (rand)[0]) *   \
+   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
 #ifndef INET6
-#defineSYN_HASHALL(hash, src, dst) \
+#defineSYN_HASHALL(hash, src, dst, rand) \
 do {   \
hash = SYN_HASH(&satosin(src)->sin_addr,\
satosin(src)->sin_port, \
-   satosin(dst)->sin_port);\
+   satosin(dst)->sin_port, (rand));\
 } while (/*CONSTCOND*/ 0)
 #else
-#define SYN_HASH6(sa, sp, dp) \
-   (((sa)->s6_addr32[0] ^ tcp_syn_hash[0]) *   \
-   ((sa)->s6_addr32[1] ^ tcp_syn_hash[1]) *\
-   ((sa)->s6_addr32[2] ^ tcp_syn_hash[2]) *\
-   ((sa)->s6_addr32[3] ^ tcp_syn_hash[3]) *\
-   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ tcp_syn_hash[4]))
+#define SYN_HASH6(sa, sp, dp, rand) \
+   (((sa)->s6_addr32[0] ^ (rand)[0]) * \
+   ((sa)->s6_addr32[1] ^ (rand)[1]) *  \
+   ((sa)->s6_addr32[2] ^ (rand)[2]) *  \
+   ((sa)->s6_addr32[3] ^ (rand)[3]) *  \
+   (u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4]))
 
-#define SYN_HASHALL(hash, src, dst) \
+#define SYN_HASHALL(hash, src, dst, rand) \
 do {   \
switch ((src)->sa_family) { \
case AF_INET:   \
hash = SYN_HASH(&satosin(src)->sin_addr,\
satosin(src)->sin_port, \
-   satosin(dst)->sin_port);\
+   satosin(dst)->sin_port, (rand));\
break;  \
case AF_INET6:  \
hash = SYN_HASH6(&satosin6(src)->sin6_addr, \
satosin6(src)->sin6_port,   \
-   satosin6(dst)->sin6_port);  \
+   satosin6(dst)->sin6_port, (rand));  \
break;  \
default:\
hash = 0;   \
@@ -3305,13 +3309,12 @@ void
 syn_cache_rm(struct syn_cache *sc)
 {
sc->sc_flags |= SCF_DEAD;
-   TAILQ_REMOVE(&tcp_syn_cache[sc->sc_bucketidx].sch_bucket,
-   sc, sc_bucketq);
+   TAILQ_REMOVE(&sc->sc_buckethead->sch_bucket, sc, sc_bucketq);
sc->sc_tp = NULL;
LIST_REMOVE(sc, sc_tpq);
-   tcp_syn_cache[sc->sc_bucketidx].sch_length--;
+   sc->sc_buckethead->s