With upstream kernel commit 798b2cbf9227b1bd7d37ae9af4d9c750e6f4de9c
CONFIG_CRYPTO and CONFIG_CRYPTO_AES are mandatory when CONFIG_INET is
selected.

Some targets could save space if those CRYPTO_* options were not
selected. On ar71xx that would be ~20k on kernel binary. The downside is
that entire image will be bigger if CRYPTO_* options are built as a
modules. 

So the real question is do we want to remove tcp fastopen support so
some targets can benefit from ~20k kernel size reduction?

Bellow is also patched generic/config-3.10 but don't take that into
consideration.

Luka


 target/linux/generic/config-3.10                   |  10 +-
 .../690-openwrt-disable-tcp_fastopen.patch         | 397 +++++++++++++++++++++
 2 files changed, 402 insertions(+), 5 deletions(-)
 create mode 100644 
target/linux/generic/patches-3.10/690-openwrt-disable-tcp_fastopen.patch

diff --git a/target/linux/generic/config-3.10 b/target/linux/generic/config-3.10
index 943c217..fe79fec 100644
--- a/target/linux/generic/config-3.10
+++ b/target/linux/generic/config-3.10
@@ -573,14 +573,14 @@ CONFIG_CRC32_SARWATE=y
 # CONFIG_CRC_T10DIF is not set
 CONFIG_CROSS_COMPILE=""
 # CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_CRYPTO=y
+# CONFIG_CRYPTO is not set
 # CONFIG_CRYPTO_AEAD is not set
-CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_AES is not set
 # CONFIG_CRYPTO_AES_586 is not set
 # CONFIG_CRYPTO_AES_ARM is not set
 # CONFIG_CRYPTO_AES_NI_INTEL is not set
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ALGAPI2=y
+# CONFIG_CRYPTO_ALGAPI is not set
+# CONFIG_CRYPTO_ALGAPI2 is not set
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_ANUBIS is not set
 # CONFIG_CRYPTO_ARC4 is not set
@@ -620,7 +620,7 @@ CONFIG_CRYPTO_ALGAPI2=y
 # CONFIG_CRYPTO_LZO is not set
 # CONFIG_CRYPTO_MANAGER is not set
 # CONFIG_CRYPTO_MANAGER2 is not set
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 # CONFIG_CRYPTO_MD4 is not set
 # CONFIG_CRYPTO_MD5 is not set
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git 
a/target/linux/generic/patches-3.10/690-openwrt-disable-tcp_fastopen.patch 
b/target/linux/generic/patches-3.10/690-openwrt-disable-tcp_fastopen.patch
new file mode 100644
index 0000000..d94fd95
--- /dev/null
+++ b/target/linux/generic/patches-3.10/690-openwrt-disable-tcp_fastopen.patch
@@ -0,0 +1,397 @@
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -52,8 +52,6 @@ source "net/iucv/Kconfig"
+ 
+ config INET
+       bool "TCP/IP networking"
+-      select CRYPTO
+-      select CRYPTO_AES
+       ---help---
+         These are the protocols used on the Internet and on most local
+         Ethernets. It is highly recommended to say Y here (this will enlarge
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -233,47 +233,6 @@ static int ipv4_tcp_mem(ctl_table *ctl,
+       return 0;
+ }
+ 
+-static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user 
*buffer,
+-                               size_t *lenp, loff_t *ppos)
+-{
+-      ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+-      struct tcp_fastopen_context *ctxt;
+-      int ret;
+-      u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+-
+-      tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+-      if (!tbl.data)
+-              return -ENOMEM;
+-
+-      rcu_read_lock();
+-      ctxt = rcu_dereference(tcp_fastopen_ctx);
+-      if (ctxt)
+-              memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+-      else
+-              memset(user_key, 0, sizeof(user_key));
+-      rcu_read_unlock();
+-
+-      snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+-              user_key[0], user_key[1], user_key[2], user_key[3]);
+-      ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+-
+-      if (write && ret == 0) {
+-              if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
+-                         user_key + 2, user_key + 3) != 4) {
+-                      ret = -EINVAL;
+-                      goto bad_key;
+-              }
+-              tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+-      }
+-
+-bad_key:
+-      pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+-             user_key[0], user_key[1], user_key[2], user_key[3],
+-             (char *)tbl.data, ret);
+-      kfree(tbl.data);
+-      return ret;
+-}
+-
+ static struct ctl_table ipv4_table[] = {
+       {
+               .procname       = "tcp_timestamps",
+@@ -421,19 +380,6 @@ static struct ctl_table ipv4_table[] = {
+       },
+ #endif
+       {
+-              .procname       = "tcp_fastopen",
+-              .data           = &sysctl_tcp_fastopen,
+-              .maxlen         = sizeof(int),
+-              .mode           = 0644,
+-              .proc_handler   = proc_dointvec,
+-      },
+-      {
+-              .procname       = "tcp_fastopen_key",
+-              .mode           = 0600,
+-              .maxlen         = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
+-              .proc_handler   = proc_tcp_fastopen_key,
+-      },
+-      {
+               .procname       = "tcp_tw_recycle",
+               .data           = &tcp_death_row.sysctl_tw_recycle,
+               .maxlen         = sizeof(int),
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -1,91 +1,10 @@
+-#include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/list.h>
+-#include <linux/tcp.h>
+-#include <linux/rcupdate.h>
+-#include <linux/rculist.h>
+-#include <net/inetpeer.h>
+-#include <net/tcp.h>
+ 
+-int sysctl_tcp_fastopen __read_mostly;
+-
+-struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+-
+-static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
+-
+-static void tcp_fastopen_ctx_free(struct rcu_head *head)
+-{
+-      struct tcp_fastopen_context *ctx =
+-          container_of(head, struct tcp_fastopen_context, rcu);
+-      crypto_free_cipher(ctx->tfm);
+-      kfree(ctx);
+-}
+-
+-int tcp_fastopen_reset_cipher(void *key, unsigned int len)
+-{
+-      int err;
+-      struct tcp_fastopen_context *ctx, *octx;
+-
+-      ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+-      if (!ctx)
+-              return -ENOMEM;
+-      ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+-
+-      if (IS_ERR(ctx->tfm)) {
+-              err = PTR_ERR(ctx->tfm);
+-error:                kfree(ctx);
+-              pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
+-              return err;
+-      }
+-      err = crypto_cipher_setkey(ctx->tfm, key, len);
+-      if (err) {
+-              pr_err("TCP: TFO cipher key error: %d\n", err);
+-              crypto_free_cipher(ctx->tfm);
+-              goto error;
+-      }
+-      memcpy(ctx->key, key, len);
+-
+-      spin_lock(&tcp_fastopen_ctx_lock);
+-
+-      octx = rcu_dereference_protected(tcp_fastopen_ctx,
+-                              lockdep_is_held(&tcp_fastopen_ctx_lock));
+-      rcu_assign_pointer(tcp_fastopen_ctx, ctx);
+-      spin_unlock(&tcp_fastopen_ctx_lock);
+-
+-      if (octx)
+-              call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
+-      return err;
+-}
+-
+-/* Computes the fastopen cookie for the peer.
+- * The peer address is a 128 bits long (pad with zeros for IPv4).
+- *
+- * The caller must check foc->len to determine if a valid cookie
+- * has been generated successfully.
+-*/
+-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+-{
+-      __be32 peer_addr[4] = { addr, 0, 0, 0 };
+-      struct tcp_fastopen_context *ctx;
+-
+-      rcu_read_lock();
+-      ctx = rcu_dereference(tcp_fastopen_ctx);
+-      if (ctx) {
+-              crypto_cipher_encrypt_one(ctx->tfm,
+-                                        foc->val,
+-                                        (__u8 *)peer_addr);
+-              foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+-      }
+-      rcu_read_unlock();
+-}
++int sysctl_tcp_fastopen __read_mostly = 0;
+ 
+ static int __init tcp_fastopen_init(void)
+ {
+-      __u8 key[TCP_FASTOPEN_KEY_LENGTH];
+-
+-      get_random_bytes(key, sizeof(key));
+-      tcp_fastopen_reset_cipher(key, sizeof(key));
+       return 0;
+ }
+ 
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1285,183 +1285,6 @@
+ };
+ #endif
+ 
+-static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
+-                             struct request_sock *req,
+-                             struct tcp_fastopen_cookie *foc,
+-                             struct tcp_fastopen_cookie *valid_foc)
+-{
+-      bool skip_cookie = false;
+-      struct fastopen_queue *fastopenq;
+-
+-      if (likely(!fastopen_cookie_present(foc))) {
+-              /* See include/net/tcp.h for the meaning of these knobs */
+-              if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
+-                  ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
+-                  (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
+-                      skip_cookie = true; /* no cookie to validate */
+-              else
+-                      return false;
+-      }
+-      fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+-      /* A FO option is present; bump the counter. */
+-      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
+-
+-      /* Make sure the listener has enabled fastopen, and we don't
+-       * exceed the max # of pending TFO requests allowed before trying
+-       * to validating the cookie in order to avoid burning CPU cycles
+-       * unnecessarily.
+-       *
+-       * XXX (TFO) - The implication of checking the max_qlen before
+-       * processing a cookie request is that clients can't differentiate
+-       * between qlen overflow causing Fast Open to be disabled
+-       * temporarily vs a server not supporting Fast Open at all.
+-       */
+-      if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
+-          fastopenq == NULL || fastopenq->max_qlen == 0)
+-              return false;
+-
+-      if (fastopenq->qlen >= fastopenq->max_qlen) {
+-              struct request_sock *req1;
+-              spin_lock(&fastopenq->lock);
+-              req1 = fastopenq->rskq_rst_head;
+-              if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+-                      spin_unlock(&fastopenq->lock);
+-                      NET_INC_STATS_BH(sock_net(sk),
+-                          LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+-                      /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
+-                      foc->len = -1;
+-                      return false;
+-              }
+-              fastopenq->rskq_rst_head = req1->dl_next;
+-              fastopenq->qlen--;
+-              spin_unlock(&fastopenq->lock);
+-              reqsk_free(req1);
+-      }
+-      if (skip_cookie) {
+-              tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-              return true;
+-      }
+-      if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
+-              if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
+-                      tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+-                      if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
+-                          memcmp(&foc->val[0], &valid_foc->val[0],
+-                          TCP_FASTOPEN_COOKIE_SIZE) != 0)
+-                              return false;
+-                      valid_foc->len = -1;
+-              }
+-              /* Acknowledge the data received from the peer. */
+-              tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-              return true;
+-      } else if (foc->len == 0) { /* Client requesting a cookie */
+-              tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+-              NET_INC_STATS_BH(sock_net(sk),
+-                  LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+-      } else {
+-              /* Client sent a cookie with wrong size. Treat it
+-               * the same as invalid and return a valid one.
+-               */
+-              tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+-      }
+-      return false;
+-}
+-
+-static int tcp_v4_conn_req_fastopen(struct sock *sk,
+-                                  struct sk_buff *skb,
+-                                  struct sk_buff *skb_synack,
+-                                  struct request_sock *req)
+-{
+-      struct tcp_sock *tp = tcp_sk(sk);
+-      struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+-      const struct inet_request_sock *ireq = inet_rsk(req);
+-      struct sock *child;
+-      int err;
+-
+-      req->num_retrans = 0;
+-      req->num_timeout = 0;
+-      req->sk = NULL;
+-
+-      child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+-      if (child == NULL) {
+-              NET_INC_STATS_BH(sock_net(sk),
+-                               LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+-              kfree_skb(skb_synack);
+-              return -1;
+-      }
+-      err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+-                                  ireq->rmt_addr, ireq->opt);
+-      err = net_xmit_eval(err);
+-      if (!err)
+-              tcp_rsk(req)->snt_synack = tcp_time_stamp;
+-      /* XXX (TFO) - is it ok to ignore error and continue? */
+-
+-      spin_lock(&queue->fastopenq->lock);
+-      queue->fastopenq->qlen++;
+-      spin_unlock(&queue->fastopenq->lock);
+-
+-      /* Initialize the child socket. Have to fix some values to take
+-       * into account the child is a Fast Open socket and is created
+-       * only out of the bits carried in the SYN packet.
+-       */
+-      tp = tcp_sk(child);
+-
+-      tp->fastopen_rsk = req;
+-      /* Do a hold on the listner sk so that if the listener is being
+-       * closed, the child that has been accepted can live on and still
+-       * access listen_lock.
+-       */
+-      sock_hold(sk);
+-      tcp_rsk(req)->listener = sk;
+-
+-      /* RFC1323: The window in SYN & SYN/ACK segments is never
+-       * scaled. So correct it appropriately.
+-       */
+-      tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+-
+-      /* Activate the retrans timer so that SYNACK can be retransmitted.
+-       * The request socket is not added to the SYN table of the parent
+-       * because it's been added to the accept queue directly.
+-       */
+-      inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+-          TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+-
+-      /* Add the child socket directly into the accept queue */
+-      inet_csk_reqsk_queue_add(sk, req, child);
+-
+-      /* Now finish processing the fastopen child socket. */
+-      inet_csk(child)->icsk_af_ops->rebuild_header(child);
+-      tcp_init_congestion_control(child);
+-      tcp_mtup_init(child);
+-      tcp_init_buffer_space(child);
+-      tcp_init_metrics(child);
+-
+-      /* Queue the data carried in the SYN packet. We need to first
+-       * bump skb's refcnt because the caller will attempt to free it.
+-       *
+-       * XXX (TFO) - we honor a zero-payload TFO request for now.
+-       * (Any reason not to?)
+-       */
+-      if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
+-              /* Don't queue the skb if there is no payload in SYN.
+-               * XXX (TFO) - How about SYN+FIN?
+-               */
+-              tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-      } else {
+-              skb = skb_get(skb);
+-              skb_dst_drop(skb);
+-              __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+-              skb_set_owner_r(skb, child);
+-              __skb_queue_tail(&child->sk_receive_queue, skb);
+-              tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-              tp->syn_data_acked = 1;
+-      }
+-      sk->sk_data_ready(sk, 0);
+-      bh_unlock_sock(child);
+-      sock_put(child);
+-      WARN_ON(req->sk == NULL);
+-      return 0;
+-}
+-
+ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+       struct tcp_options_received tmp_opt;
+@@ -1477,7 +1300,6 @@
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+       struct sk_buff *skb_synack;
+-      int do_fastopen;
+ 
+       /* Never answer to SYNs send to broadcast or multicast */
+       if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -1582,7 +1404,6 @@
+               if (dst == NULL)
+                       goto drop_and_free;
+       }
+-      do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
+ 
+       /* We don't call tcp_v4_send_synack() directly because we need
+        * to make sure a child socket can be created successfully before
+@@ -1604,7 +1425,7 @@
+       } else
+               goto drop_and_free;
+ 
+-      if (likely(!do_fastopen)) {
++      if (1) {
+               int err;
+               err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+                    ireq->rmt_addr, ireq->opt);
+@@ -1619,8 +1440,7 @@
+               if (fastopen_cookie_present(&foc) && foc.len != 0)
+                       NET_INC_STATS_BH(sock_net(sk),
+                           LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+-      } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
+-              goto drop_and_free;
++      }
+ 
+       return 0;
+ 
_______________________________________________
openwrt-devel mailing list
[email protected]
https://lists.openwrt.org/cgi-bin/mailman/listinfo/openwrt-devel

Reply via email to