Module Name:    src
Committed By:   kefren
Date:           Tue Nov 12 09:02:05 UTC 2013

Modified Files:
        src/sys/netinet: tcp_congctl.c tcp_congctl.h tcp_input.c tcp_sack.c
            tcp_subr.c tcp_var.h

Log Message:
* implement TCP CUBIC congestion control algorithm
* move tcp_sack_newack bits inside reno and newreno_fast_retransmit_newack
* notify ECN peer about cwnd shrink in [new]reno_slow_retransmit

Based on the patch proposed on tech-net@ on Nov 7 with minor improvments:
 * adapt wmax for no-fast convergence case
 * correct cbrt calculation for big window sizes (>750KB)


To generate a diff of this commit:
cvs rdiff -u -r1.17 -r1.18 src/sys/netinet/tcp_congctl.c
cvs rdiff -u -r1.6 -r1.7 src/sys/netinet/tcp_congctl.h
cvs rdiff -u -r1.329 -r1.330 src/sys/netinet/tcp_input.c
cvs rdiff -u -r1.28 -r1.29 src/sys/netinet/tcp_sack.c
cvs rdiff -u -r1.250 -r1.251 src/sys/netinet/tcp_subr.c
cvs rdiff -u -r1.170 -r1.171 src/sys/netinet/tcp_var.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/netinet/tcp_congctl.c
diff -u src/sys/netinet/tcp_congctl.c:1.17 src/sys/netinet/tcp_congctl.c:1.18
--- src/sys/netinet/tcp_congctl.c:1.17	Fri Oct 25 16:29:20 2013
+++ src/sys/netinet/tcp_congctl.c	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: tcp_congctl.c,v 1.17 2013/10/25 16:29:20 martin Exp $	*/
+/*	$NetBSD: tcp_congctl.c,v 1.18 2013/11/12 09:02:05 kefren Exp $	*/
 
 /*-
  * Copyright (c) 1997, 1998, 1999, 2001, 2005, 2006 The NetBSD Foundation, Inc.
@@ -135,7 +135,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.17 2013/10/25 16:29:20 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.18 2013/11/12 09:02:05 kefren Exp $");
 
 #include "opt_inet.h"
 #include "opt_tcp_debug.h"
@@ -194,6 +194,9 @@ __KERNEL_RCSID(0, "$NetBSD: tcp_congctl.
  *   consider separating the actual implementations in another file.
  */
 
+static void tcp_common_congestion_exp(struct tcpcb *, int, int);
+
+static int  tcp_reno_do_fast_retransmit(struct tcpcb *, const struct tcphdr *);
 static int  tcp_reno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
 static void tcp_reno_slow_retransmit(struct tcpcb *);
 static void tcp_reno_fast_retransmit_newack(struct tcpcb *,
@@ -206,6 +209,10 @@ static void tcp_newreno_fast_retransmit_
 	const struct tcphdr *);
 static void tcp_newreno_newack(struct tcpcb *, const struct tcphdr *);
 
+static int tcp_cubic_fast_retransmit(struct tcpcb *, const struct tcphdr *);
+static void tcp_cubic_slow_retransmit(struct tcpcb *tp);
+static void tcp_cubic_newack(struct tcpcb *, const struct tcphdr *);
+static void tcp_cubic_congestion_exp(struct tcpcb *);
 
 static void tcp_congctl_fillnames(void);
 
@@ -241,6 +248,8 @@ tcp_congctl_init(void)
 	KASSERT(r == 0);
 	r = tcp_congctl_register("newreno", &tcp_newreno_ctl);
 	KASSERT(r == 0);
+	r = tcp_congctl_register("cubic", &tcp_cubic_ctl);
+	KASSERT(r == 0);
 
 	/* NewReno is the default. */
 #ifndef TCP_CONGCTL_DEFAULT
@@ -406,18 +415,28 @@ tcp_congctl_fillnames(void)
 /* ------------------------------------------------------------------------ */
 
 /*
- * TCP/Reno congestion control.
+ * Common stuff
  */
+
+/* Window reduction (1-beta) for [New]Reno: 0.5 */
+#define RENO_BETAA 1
+#define RENO_BETAB 2
+/* Window reduction (1-beta) for Cubic: 0.8 */
+#define CUBIC_BETAA 4
+#define CUBIC_BETAB 5
+/* Draft Rhee Section 4.1 */
+#define CUBIC_CA 4
+#define CUBIC_CB 10
+
 static void
-tcp_reno_congestion_exp(struct tcpcb *tp)
+tcp_common_congestion_exp(struct tcpcb *tp, int betaa, int betab)
 {
 	u_int win;
 
 	/* 
-	 * Halve the congestion window and reduce the
-	 * slow start threshold.
+	 * Reduce the congestion window and the slow start threshold.
 	 */
-	win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
+	win = min(tp->snd_wnd, tp->snd_cwnd) * betaa / betab / tp->t_segsz;
 	if (win < 2)
 		win = 2;
 
@@ -434,9 +453,20 @@ tcp_reno_congestion_exp(struct tcpcb *tp
 }
 
 
+/* ------------------------------------------------------------------------ */
+
+/*
+ * TCP/Reno congestion control.
+ */
+static void
+tcp_reno_congestion_exp(struct tcpcb *tp)
+{
+
+	tcp_common_congestion_exp(tp, RENO_BETAA, RENO_BETAB);
+}
 
 static int
-tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
+tcp_reno_do_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
 {
 	/*
 	 * We know we're losing at the current
@@ -458,10 +488,8 @@ tcp_reno_fast_retransmit(struct tcpcb *t
 	 * irrespective of the number of DupAcks.
 	 */
 	
-	tcp_seq onxt;
-	
-	onxt = tp->snd_nxt;
-	tcp_reno_congestion_exp(tp);
+	tcp_seq onxt = tp->snd_nxt;
+
 	tp->t_partialacks = 0;
 	TCP_TIMER_DISARM(tp, TCPT_REXMT);
 	tp->t_rtttime = 0;
@@ -482,6 +510,14 @@ tcp_reno_fast_retransmit(struct tcpcb *t
 	return 0;
 }
 
+static int
+tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
+{
+
+	tcp_reno_congestion_exp(tp);
+	return tcp_reno_do_fast_retransmit(tp, th);
+}
+
 static void
 tcp_reno_slow_retransmit(struct tcpcb *tp)
 {
@@ -521,6 +557,9 @@ tcp_reno_slow_retransmit(struct tcpcb *t
 	tp->t_partialacks = -1;
 	tp->t_dupacks = 0;
 	tp->t_bytes_acked = 0;
+
+	if (TCP_ECN_ALLOWED(tp))
+		tp->t_flags |= TF_ECN_SND_CWR;
 }
 
 static void
@@ -543,6 +582,8 @@ tcp_reno_fast_retransmit_newack(struct t
 		tp->t_partialacks = -1;
 		tp->t_dupacks = 0;
 		tp->t_bytes_acked = 0;
+		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
+			tp->snd_fack = th->th_ack;
 	}
 }
 
@@ -653,6 +694,7 @@ tcp_newreno_fast_retransmit_newack(struc
 		 */
 		tcp_seq onxt = tp->snd_nxt;
 		u_long ocwnd = tp->snd_cwnd;
+		int sack_num_segs = 1, sack_bytes_rxmt = 0;
 
 		/*
 		 * snd_una has not yet been updated and the socket's send
@@ -660,24 +702,52 @@ tcp_newreno_fast_retransmit_newack(struc
 		 * have to leave snd_una as it was to get the correct data
 		 * offset in tcp_output().
 		 */
-		if (++tp->t_partialacks == 1)
-			TCP_TIMER_DISARM(tp, TCPT_REXMT);
+		tp->t_partialacks++;
+		TCP_TIMER_DISARM(tp, TCPT_REXMT);
 		tp->t_rtttime = 0;
 		tp->snd_nxt = th->th_ack;
-		/*
-		 * Set snd_cwnd to one segment beyond ACK'd offset.  snd_una
-		 * is not yet updated when we're called.
-		 */
-		tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
-		(void) tcp_output(tp);
-		tp->snd_cwnd = ocwnd;
-		if (SEQ_GT(onxt, tp->snd_nxt))
-			tp->snd_nxt = onxt;
-		/*
-		 * Partial window deflation.  Relies on fact that tp->snd_una
-		 * not updated yet.
-		 */
-		tp->snd_cwnd -= (th->th_ack - tp->snd_una - tp->t_segsz);
+
+		if (TCP_SACK_ENABLED(tp)) {
+			/*
+			 * Partial ack handling within a sack recovery episode.
+			 * Keeping this very simple for now. When a partial ack
+			 * is received, force snd_cwnd to a value that will
+			 * allow the sender to transmit no more than 2 segments.
+			 * If necessary, a fancier scheme can be adopted at a
+			 * later point, but for now, the goal is to prevent the
+			 * sender from bursting a large amount of data in the
+			 * midst of sack recovery.
+		 	 */
+
+			/*
+			 * send one or 2 segments based on how much
+			 * new data was acked
+			 */
+			if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
+				sack_num_segs = 2;
+			(void)tcp_sack_output(tp, &sack_bytes_rxmt);
+			tp->snd_cwnd = sack_bytes_rxmt +
+			    (tp->snd_nxt - tp->sack_newdata) +
+			    sack_num_segs * tp->t_segsz;
+			tp->t_flags |= TF_ACKNOW;
+			(void) tcp_output(tp);
+		} else {
+			/*
+			 * Set snd_cwnd to one segment beyond ACK'd offset
+			 * snd_una is not yet updated when we're called
+			 */
+			tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
+			(void) tcp_output(tp);
+			tp->snd_cwnd = ocwnd;
+			if (SEQ_GT(onxt, tp->snd_nxt))
+				tp->snd_nxt = onxt;
+			/*
+			 * Partial window deflation.  Relies on fact that
+			 * tp->snd_una not updated yet.
+		 	 */
+			tp->snd_cwnd -= (th->th_ack - tp->snd_una -
+			    tp->t_segsz);
+		}
 	} else {
 		/*
 		 * Complete ack.  Inflate the congestion window to ssthresh
@@ -696,6 +766,8 @@ tcp_newreno_fast_retransmit_newack(struc
 		tp->t_partialacks = -1;
 		tp->t_dupacks = 0;
 		tp->t_bytes_acked = 0;
+		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
+			tp->snd_fack = th->th_ack;
 	}
 }
 
@@ -720,4 +792,179 @@ const struct tcp_congctl tcp_newreno_ctl
 	.cong_exp = tcp_reno_congestion_exp,
 };
 
+/*
+ * CUBIC - http://tools.ietf.org/html/draft-rhee-tcpm-cubic-02
+ */
+
+/* Cubic prototypes */
+static void	tcp_cubic_update_ctime(struct tcpcb *tp);
+static uint32_t	tcp_cubic_diff_ctime(struct tcpcb *);
+static uint32_t	tcp_cubic_cbrt(uint32_t);
+static uint32_t	tcp_cubic_getW(struct tcpcb *);
+
+/* Cubic TIME functions - XXX I don't like using timevals and microuptime */
+/*
+ * Set congestion timer to now
+ */
+static void
+tcp_cubic_update_ctime(struct tcpcb *tp)
+{
+	struct timeval now_timeval;
+
+	getmicrouptime(&now_timeval);
+	tp->snd_cubic_ctime = now_timeval.tv_sec * 1000 +
+	    now_timeval.tv_usec / 1000;
+}
+
+/*
+ * miliseconds from last congestion
+ */
+static uint32_t
+tcp_cubic_diff_ctime(struct tcpcb *tp)
+{
+	struct timeval now_timeval;
+
+	getmicrouptime(&now_timeval);
+	return now_timeval.tv_sec * 1000 + now_timeval.tv_usec / 1000 -
+	    tp->snd_cubic_ctime;
+}
 
+/*
+ * Approximate cubic root
+ */
+#define CBRT_ROUNDS 30
+static uint32_t
+tcp_cubic_cbrt(uint32_t v)
+{
+	int i, rounds = CBRT_ROUNDS;
+	uint64_t x = v / 3;
+
+	/* We fail to calculate correct for small numbers */
+	if (v == 0)
+		return 0;
+	else if (v < 4)
+		return 1;
+
+	/*
+	 * largest x that 2*x^3+3*x fits 64bit
+	 * Avoid overflow for a time cost
+	 */
+	if (x > 2097151)
+		rounds += 10;
+
+	for (i = 0; i < rounds; i++)
+		if (rounds == CBRT_ROUNDS)
+			x = (v + 2 * x * x * x) / (3 * x * x);
+		else
+			/* Avoid overflow */
+			x = v / (3 * x * x) + 2 * x / 3;
+
+	return (uint32_t)x;
+}
+
+/* Draft Rhee Section 3.1 - get W(t) */
+static uint32_t
+tcp_cubic_getW(struct tcpcb *tp)
+{
+	uint32_t ms_elapsed = tcp_cubic_diff_ctime(tp);
+	uint32_t K, CtK;
+
+	K = tcp_cubic_cbrt(tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB *
+	    CUBIC_CB / CUBIC_CA);
+	/*  C*(t-K)  */
+	CtK = CUBIC_CA * (ms_elapsed - K) / CUBIC_CB;
+
+	return CtK * CtK * CtK + tp->snd_cubic_wmax;
+}
+
+static void
+tcp_cubic_congestion_exp(struct tcpcb *tp)
+{
+
+	tcp_cubic_update_ctime(tp);
+
+	/* Section 3.6 - Fast Convergence */
+	if (tp->snd_cubic_wmax < tp->snd_cubic_wmax_last) {
+		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
+		tp->snd_cubic_wmax = tp->snd_cubic_wmax / 2 +
+		    tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB / 2;
+	} else {
+		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
+		tp->snd_cubic_wmax = tp->snd_cwnd;
+	}
+	tcp_common_congestion_exp(tp, CUBIC_BETAA, CUBIC_BETAB);
+}
+
+static int
+tcp_cubic_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
+{
+
+	if (SEQ_LT(th->th_ack, tp->snd_high)) {
+		/* See newreno */
+		tp->t_dupacks = 0;
+		return 1;
+	}
+
+	/*
+	 * do CUBIC if not in fast recovery
+	 */
+	if (tp->t_partialacks < 0) {
+		/* Adjust W_max, W_max_last, cwnd and ssthresh */
+	        tcp_cubic_congestion_exp(tp);
+		/* Reno and NewReno FR */
+		return tcp_reno_do_fast_retransmit(tp, th);
+	} else
+		return tcp_reno_fast_retransmit(tp, th);
+}
+
+static void
+tcp_cubic_newack(struct tcpcb *tp, const struct tcphdr *th)
+{
+	uint32_t ms_elapsed, rtt;
+	u_long w_tcp;
+
+	/* Congestion avoidance and not in fast recovery */
+	if (tp->snd_cwnd > tp->snd_ssthresh && tp->t_partialacks < 0) {
+		ms_elapsed = tcp_cubic_diff_ctime(tp);
+
+		rtt = max(hztoms(1), hztoms((tp->t_srtt >> TCP_RTT_SHIFT)));
+
+		/* Compute W_tcp(t) - XXX should use BETA defines */
+		w_tcp = tp->snd_cubic_wmax * 4 / 5 +
+		    ms_elapsed / rtt / 3;
+
+		if (tp->snd_cwnd > w_tcp) {
+			/* Not in TCP mode */
+			tp->snd_cwnd += (tcp_cubic_getW(tp) - tp->snd_cwnd) / 
+			    tp->snd_cwnd;
+		} else {
+			/* friendly TCP mode */
+			tp->snd_cwnd = w_tcp;
+		}
+
+		/* Make sure we are within limits */
+		tp->snd_cwnd = max(tp->snd_cwnd, tp->t_segsz);
+		tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
+	} else {
+		/* Use New Reno */
+		tcp_newreno_newack(tp, th);
+	}
+}
+
+static void
+tcp_cubic_slow_retransmit(struct tcpcb *tp)
+{
+
+	/* Reset */
+	tp->snd_cubic_wmax = tp->snd_cubic_wmax_last = tp->snd_cubic_ctime = 0;
+
+	tcp_reno_slow_retransmit(tp);
+}
+
+const struct tcp_congctl tcp_cubic_ctl = {
+	.fast_retransmit = tcp_cubic_fast_retransmit,
+	.slow_retransmit = tcp_cubic_slow_retransmit,
+	.fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
+	.newack = tcp_cubic_newack,
+	.cong_exp = tcp_cubic_congestion_exp,
+};

Index: src/sys/netinet/tcp_congctl.h
diff -u src/sys/netinet/tcp_congctl.h:1.6 src/sys/netinet/tcp_congctl.h:1.7
--- src/sys/netinet/tcp_congctl.h:1.6	Thu Apr 14 15:57:02 2011
+++ src/sys/netinet/tcp_congctl.h	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: tcp_congctl.h,v 1.6 2011/04/14 15:57:02 yamt Exp $	*/
+/*	$NetBSD: tcp_congctl.h,v 1.7 2013/11/12 09:02:05 kefren Exp $	*/
 
 /*
  * Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -78,6 +78,7 @@ struct tcp_congctl {
 
 extern const struct tcp_congctl tcp_reno_ctl;
 extern const struct tcp_congctl tcp_newreno_ctl;
+extern const struct tcp_congctl tcp_cubic_ctl;
 
 /* currently selected global congestion control */
 extern char tcp_congctl_global_name[TCPCC_MAXLEN];

Index: src/sys/netinet/tcp_input.c
diff -u src/sys/netinet/tcp_input.c:1.329 src/sys/netinet/tcp_input.c:1.330
--- src/sys/netinet/tcp_input.c:1.329	Sun Sep 15 14:42:38 2013
+++ src/sys/netinet/tcp_input.c	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: tcp_input.c,v 1.329 2013/09/15 14:42:38 martin Exp $	*/
+/*	$NetBSD: tcp_input.c,v 1.330 2013/11/12 09:02:05 kefren Exp $	*/
 
 /*
  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
@@ -148,7 +148,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tcp_input.c,v 1.329 2013/09/15 14:42:38 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tcp_input.c,v 1.330 2013/11/12 09:02:05 kefren Exp $");
 
 #include "opt_inet.h"
 #include "opt_ipsec.h"
@@ -2655,12 +2655,8 @@ after_listen:
 		 * If the congestion window was inflated to account
 		 * for the other side's cached packets, retract it.
 		 */
-		/* XXX: make SACK have his own congestion control
-		 * struct -- rpaulo */
-		if (TCP_SACK_ENABLED(tp))
-			tcp_sack_newack(tp, th);
-		else
-			tp->t_congctl->fast_retransmit_newack(tp, th);
+		tp->t_congctl->fast_retransmit_newack(tp, th);
+
 		if (SEQ_GT(th->th_ack, tp->snd_max)) {
 			TCP_STATINC(TCP_STAT_RCVACKTOOMUCH);
 			goto dropafterack;

Index: src/sys/netinet/tcp_sack.c
diff -u src/sys/netinet/tcp_sack.c:1.28 src/sys/netinet/tcp_sack.c:1.29
--- src/sys/netinet/tcp_sack.c:1.28	Mon Jan 30 23:31:27 2012
+++ src/sys/netinet/tcp_sack.c	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/* $NetBSD: tcp_sack.c,v 1.28 2012/01/30 23:31:27 matt Exp $ */
+/* $NetBSD: tcp_sack.c,v 1.29 2013/11/12 09:02:05 kefren Exp $ */
 
 /*
  * Copyright (c) 2005 The NetBSD Foundation, Inc.
@@ -102,7 +102,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tcp_sack.c,v 1.28 2012/01/30 23:31:27 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tcp_sack.c,v 1.29 2013/11/12 09:02:05 kefren Exp $");
 
 #include "opt_inet.h"
 #include "opt_ipsec.h"
@@ -443,69 +443,6 @@ tcp_free_sackholes(struct tcpcb *tp)
 }
 
 /*
- * Implements the SACK response to a new ack, checking for partial acks
- * in fast recovery.
- */
-void
-tcp_sack_newack(struct tcpcb *tp, const struct tcphdr *th)
-{
-	if (tp->t_partialacks < 0) {
-		/*
-		 * Not in fast recovery.  Reset the duplicate ack
-		 * counter.
-		 */
-		tp->t_dupacks = 0;
-	} else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
-		/*
-		 * Partial ack handling within a sack recovery episode. 
-		 * Keeping this very simple for now. When a partial ack
-		 * is received, force snd_cwnd to a value that will allow
-		 * the sender to transmit no more than 2 segments.
-		 * If necessary, a fancier scheme can be adopted at a 
-		 * later point, but for now, the goal is to prevent the
-		 * sender from bursting a large amount of data in the midst
-		 * of sack recovery.
-		 */
-		int num_segs = 1;
-		int sack_bytes_rxmt = 0;
-
-		tp->t_partialacks++;
-		TCP_TIMER_DISARM(tp, TCPT_REXMT);
-		tp->t_rtttime = 0;
-
-	 	/*
-		 * send one or 2 segments based on how much new data was acked
-		 */
- 		if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
- 			num_segs = 2;
-	 	(void)tcp_sack_output(tp, &sack_bytes_rxmt);
- 		tp->snd_cwnd = sack_bytes_rxmt +
-		    (tp->snd_nxt - tp->sack_newdata) + num_segs * tp->t_segsz;
-  		tp->t_flags |= TF_ACKNOW;
-	  	(void) tcp_output(tp);
-	} else {
-		/*
-		 * Complete ack, inflate the congestion window to
-                 * ssthresh and exit fast recovery.
-		 *
-		 * Window inflation should have left us with approx.
-		 * snd_ssthresh outstanding data.  But in case we
-		 * would be inclined to send a burst, better to do
-		 * it via the slow start mechanism.
-		 */
-		if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
-			tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
-			    + tp->t_segsz;
-		else
-			tp->snd_cwnd = tp->snd_ssthresh;
-		tp->t_partialacks = -1;
-		tp->t_dupacks = 0;
-		if (SEQ_GT(th->th_ack, tp->snd_fack))
-			tp->snd_fack = th->th_ack;
-	}
-}
-
-/*
  * Returns pointer to a sackhole if there are any pending retransmissions;
  * NULL otherwise.
  */

Index: src/sys/netinet/tcp_subr.c
diff -u src/sys/netinet/tcp_subr.c:1.250 src/sys/netinet/tcp_subr.c:1.251
--- src/sys/netinet/tcp_subr.c:1.250	Wed Jun  5 19:01:26 2013
+++ src/sys/netinet/tcp_subr.c	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: tcp_subr.c,v 1.250 2013/06/05 19:01:26 christos Exp $	*/
+/*	$NetBSD: tcp_subr.c,v 1.251 2013/11/12 09:02:05 kefren Exp $	*/
 
 /*
  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
@@ -91,7 +91,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.250 2013/06/05 19:01:26 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.251 2013/11/12 09:02:05 kefren Exp $");
 
 #include "opt_inet.h"
 #include "opt_ipsec.h"
@@ -963,6 +963,9 @@ static struct tcpcb tcpcb_template = {
 	.snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
 	.snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
 	.snd_numholes = 0,
+	.snd_cubic_wmax = 0,
+	.snd_cubic_wmax_last = 0,
+	.snd_cubic_ctime = 0,
 
 	.t_partialacks = -1,
 	.t_bytes_acked = 0,

Index: src/sys/netinet/tcp_var.h
diff -u src/sys/netinet/tcp_var.h:1.170 src/sys/netinet/tcp_var.h:1.171
--- src/sys/netinet/tcp_var.h:1.170	Wed Apr 10 00:16:04 2013
+++ src/sys/netinet/tcp_var.h	Tue Nov 12 09:02:05 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: tcp_var.h,v 1.170 2013/04/10 00:16:04 christos Exp $	*/
+/*	$NetBSD: tcp_var.h,v 1.171 2013/11/12 09:02:05 kefren Exp $	*/
 
 /*
  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
@@ -328,6 +328,11 @@ struct tcpcb {
 	tcp_seq snd_fack;		/* FACK TCP.  Forward-most data held by
 					   peer. */
 
+/* CUBIC variables */
+	ulong snd_cubic_wmax;		/* W_max */
+	ulong snd_cubic_wmax_last;	/* Used for fast convergence */
+	ulong snd_cubic_ctime;		/* Last congestion time */
+
 /* pointer for syn cache entries*/
 	LIST_HEAD(, syn_cache) t_sc;	/* list of entries by this tcb */
 
@@ -966,7 +971,6 @@ void	 tcp_del_sackholes(struct tcpcb *, 
 void	 tcp_free_sackholes(struct tcpcb *);
 void	 tcp_sack_adjust(struct tcpcb *tp);
 struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt);
-void	 tcp_sack_newack(struct tcpcb *, const struct tcphdr *);
 int	 tcp_sack_numblks(const struct tcpcb *);
 #define	TCP_SACK_OPTLEN(nblks)	((nblks) * 8 + 2 + 2)
 

Reply via email to