Signed-off-by: Ilpo Järvinen <[EMAIL PROTECTED]>
---
 include/linux/tcp.h  |    2 +-
 net/ipv4/tcp_input.c |   75 ++++++++++++++++++++++++++++++-------------------
 2 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 0ec6bb6..3e412f2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -330,7 +330,7 @@ struct tcp_sock {
        struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
        struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
 
-       struct tcp_sack_block_wire recv_sack_cache[4];
+       struct tcp_sack_block recv_sack_cache[4];
 
        struct sk_buff *highest_sack;   /* highest skb with SACK received
                                         * (validity guaranteed only if
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39d6a6a..c260642 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1350,9 +1350,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff 
*ack_skb, u32 prior_snd_
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = (skb_transport_header(ack_skb) +
                              TCP_SKB_CB(ack_skb)->sacked);
-       struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+       struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire 
*)(ptr+2);
+       struct tcp_sack_block sp[4];
        struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
+       int used_sacks;
        struct tcp_sacktag_state state;
        int found_dup_sack;
        int cached_fack_count;
@@ -1367,7 +1369,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff 
*ack_skb, u32 prior_snd_
                tp->highest_sack = tcp_write_queue_head(sk);
        }
 
-       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp, num_sacks, 
prior_snd_una);
+       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, num_sacks, 
prior_snd_una);
        if (found_dup_sack)
                state.flag |= FLAG_DSACKING_ACK;
 
@@ -1378,14 +1380,46 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff 
*ack_skb, u32 prior_snd_
        if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - 
tp->max_window))
                return 0;
 
+       used_sacks = 0;
+       for (i = 0; i < num_sacks; i++) {
+               int dup_sack = !i && found_dup_sack;
+
+               sp[used_sacks].start_seq = 
ntohl(get_unaligned(&sp_wire[i].start_seq));
+               sp[used_sacks].end_seq = 
ntohl(get_unaligned(&sp_wire[i].end_seq));
+
+               if (!tcp_is_sackblock_valid(tp, dup_sack,
+                                           sp[used_sacks].start_seq,
+                                           sp[used_sacks].end_seq)) {
+                       if (dup_sack) {
+                               if (!tp->undo_marker)
+                                       
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
+                               else
+                                       
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
+                       } else {
+                               /* Don't count olds caused by ACK reordering */
+                               if ((TCP_SKB_CB(ack_skb)->ack_seq != 
tp->snd_una) &&
+                                   !after(sp[used_sacks].end_seq, tp->snd_una))
+                                       continue;
+                               NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
+                       }
+                       continue;
+               }
+
+               /* Ignore very old stuff early */
+               if (!after(sp[used_sacks].end_seq, prior_snd_una))
+                       continue;
+
+               used_sacks++;
+       }
+
        /* SACK fastpath:
         * if the only SACK change is the increase of the end_seq of
         * the first block then only apply that SACK block
         * and use retrans queue hinting otherwise slowpath */
        force_one_sack = 1;
-       for (i = 0; i < num_sacks; i++) {
-               __be32 start_seq = sp[i].start_seq;
-               __be32 end_seq = sp[i].end_seq;
+       for (i = 0; i < used_sacks; i++) {
+               u32 start_seq = sp[i].start_seq;
+               u32 end_seq = sp[i].end_seq;
 
                if (i == 0) {
                        if (tp->recv_sack_cache[i].start_seq != start_seq)
@@ -1406,17 +1440,16 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff 
*ack_skb, u32 prior_snd_
 
        state.first_sack_index = 0;
        if (force_one_sack)
-               num_sacks = 1;
+               used_sacks = 1;
        else {
                int j;
                tp->fastpath_skb_hint = NULL;
 
                /* order SACK blocks to allow in order walk of the retrans 
queue */
-               for (i = num_sacks-1; i > 0; i--) {
+               for (i = used_sacks-1; i > 0; i--) {
                        for (j = 0; j < i; j++){
-                               if (after(ntohl(sp[j].start_seq),
-                                         ntohl(sp[j+1].start_seq))){
-                                       struct tcp_sack_block_wire tmp;
+                               if (after(sp[j].start_seq, sp[j+1].start_seq)) {
+                                       struct tcp_sack_block tmp;
 
                                        tmp = sp[j];
                                        sp[j] = sp[j+1];
@@ -1443,29 +1476,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff 
*ack_skb, u32 prior_snd_
        state.prior_fackets = tp->fackets_out;
        state.highest_sack_end_seq = 0;
 
-       for (i=0; i<num_sacks; i++, sp++) {
+       for (i=0; i< used_sacks; i++) {
                struct sk_buff *skb;
-               __u32 start_seq = ntohl(sp->start_seq);
-               __u32 end_seq = ntohl(sp->end_seq);
+               u32 start_seq = sp[i].start_seq;
+               u32 end_seq = sp[i].end_seq;
                int fack_count;
                int dup_sack = (found_dup_sack && (i == 
state.first_sack_index));
 
-               if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
-                       if (dup_sack) {
-                               if (!tp->undo_marker)
-                                       
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
-                               else
-                                       
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
-                       } else {
-                               /* Don't count olds caused by ACK reordering */
-                               if ((TCP_SKB_CB(ack_skb)->ack_seq != 
tp->snd_una) &&
-                                   !after(end_seq, tp->snd_una))
-                                       continue;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
-                       }
-                       continue;
-               }
-
                skb = cached_skb;
                fack_count = cached_fack_count;
 
-- 
1.5.0.6

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to