We observed high 99 and 99.9% latencies when doing RPCs with DCTCP. The
problem is triggered when the last packet of a request arrives CE
marked. The reply will carry the ECE mark causing TCP to shrink its cwnd
to 1 (because there are no packets in flight). When the 1st packet of
the next request arrives, the ACK was sometimes delayed even though it
is CWR marked, adding up to 40ms to the RPC latency.

This patch insures that CWR makred data packets arriving will be acked
immediately.

Modified based on comments by Neal Cardwell <ncardw...@google.com>

Signed-off-by: Lawrence Brakmo <bra...@fb.com>
---
 net/ipv4/tcp_input.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 76ca88f63b70..6fd1f2378f6c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -254,10 +254,16 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
        tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 }
 
-static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+static void __tcp_ecn_check(struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
+       /* If the sender is telling us it has entered CWR, then its cwnd may be
+        * very low (even just 1 packet), so we should ACK immediately.
+        */
+       if (tcp_hdr(skb)->cwr)
+               tcp_enter_quickack_mode(sk, 2);
+
        switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
        case INET_ECN_NOT_ECT:
                /* Funny extension: if ECT is not set on a segment,
@@ -286,10 +292,10 @@ static void __tcp_ecn_check_ce(struct sock *sk, const 
struct sk_buff *skb)
        }
 }
 
-static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+static void tcp_ecn_check(struct sock *sk, const struct sk_buff *skb)
 {
        if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
-               __tcp_ecn_check_ce(sk, skb);
+               __tcp_ecn_check(sk, skb);
 }
 
 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -715,7 +721,7 @@ static void tcp_event_data_recv(struct sock *sk, struct 
sk_buff *skb)
        }
        icsk->icsk_ack.lrcvtime = now;
 
-       tcp_ecn_check_ce(sk, skb);
+       tcp_ecn_check(sk, skb);
 
        if (skb->len >= 128)
                tcp_grow_window(sk, skb);
@@ -4439,7 +4445,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct 
sk_buff *skb)
        u32 seq, end_seq;
        bool fragstolen;
 
-       tcp_ecn_check_ce(sk, skb);
+       tcp_ecn_check(sk, skb);
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
-- 
2.17.1

Reply via email to