Added callbacks to BPF SOCKET_OPS type program before an active
connection is intialized and after a passive or active connection is
established.

The following patch demostrates how they can be used to set send and
receive buffer sizes.

Signed-off-by: Lawrence Brakmo <bra...@fb.com>
---
 include/uapi/linux/bpf.h | 11 +++++++++++
 net/ipv4/tcp_fastopen.c  |  1 +
 net/ipv4/tcp_input.c     |  4 +++-
 net/ipv4/tcp_output.c    |  1 +
 4 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8accb4d..c3490d3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -765,6 +765,17 @@ enum {
                                         * window (in packets) or -1 if default
                                         * value should be used
                                         */
+       BPF_SOCKET_OPS_TCP_CONNECT_CB,  /* Calls BPF program right before an
+                                        * active connection is initialized
+                                        */
+       BPF_SOCKET_OPS_ACTIVE_ESTABLISHED_CB,   /* Calls BPF program when an
+                                                * active connection is
+                                                * established
+                                                */
+       BPF_SOCKET_OPS_PASSIVE_ESTABLISHED_CB,  /* Calls BPF program when a
+                                                * passive connection is
+                                                * established
+                                                */
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4af82b9..c3ec4ec 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -221,6 +221,7 @@ static struct sock *tcp_fastopen_create_child(struct sock 
*sk,
        tcp_init_congestion_control(child);
        tcp_mtup_init(child);
        tcp_init_metrics(child);
+       tcp_call_bpf(child, false, BPF_SOCKET_OPS_PASSIVE_ESTABLISHED_CB);
        tcp_init_buffer_space(child);
 
        tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0867b05..e0d688a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5571,7 +5571,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff 
*skb)
        icsk->icsk_af_ops->rebuild_header(sk);
 
        tcp_init_metrics(sk);
-
+       tcp_call_bpf(sk, false, BPF_SOCKET_OPS_ACTIVE_ESTABLISHED_CB);
        tcp_init_congestion_control(sk);
 
        /* Prevent spurious tcp_cwnd_restart() on first data
@@ -5977,6 +5977,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff 
*skb)
                } else {
                        /* Make sure socket is routed, for correct metrics. */
                        icsk->icsk_af_ops->rebuild_header(sk);
+                       tcp_call_bpf(sk, false,
+                                    BPF_SOCKET_OPS_PASSIVE_ESTABLISHED_CB);
                        tcp_init_congestion_control(sk);
 
                        tcp_mtup_init(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e5f623f..9124d3d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3445,6 +3445,7 @@ int tcp_connect(struct sock *sk)
        struct sk_buff *buff;
        int err;
 
+       tcp_call_bpf(sk, false, BPF_SOCKET_OPS_TCP_CONNECT_CB);
        tcp_connect_init(sk);
 
        if (unlikely(tp->repair)) {
-- 
2.9.3

Reply via email to