diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-10-11 17:34:25 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-10-11 17:34:25 -0700 |
commit | d193594299064d386a2705928cd61ab2ca3d7cee (patch) | |
tree | c84b5bde72a9f64e5504a9d05e88c8a5b628854e | |
parent | f6fb128d272eb7b0f1a8be78153a724545e28be8 (diff) |
[TCP]: Extract tcp_match_queue_to_sack from sacktag code
This is necessary for upcoming DSACK bugfix. Reduces sacktag
length which is not very sad thing at all... :-)
Notice that there's a need to handle out-of-mem at caller's
place.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_input.c | 54 |
1 files changed, 35 insertions, 19 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 50047045df6..bd18c252dee 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1181,6 +1181,38 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, return dup_sack; } +/* Check if skb is fully within the SACK block. In presence of GSO skbs, + * the incoming SACK may not exactly match but we can find smaller MSS + * aligned portion of it that matches. Therefore we might need to fragment + * which may fail and creates some hassle (caller must handle error case + * returns). + */ +int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, + u32 start_seq, u32 end_seq) +{ + int in_sack, err; + unsigned int pkt_len; + + in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && + !before(end_seq, TCP_SKB_CB(skb)->end_seq); + + if (tcp_skb_pcount(skb) > 1 && !in_sack && + after(TCP_SKB_CB(skb)->end_seq, start_seq)) { + + in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); + + if (!in_sack) + pkt_len = start_seq - TCP_SKB_CB(skb)->seq; + else + pkt_len = end_seq - TCP_SKB_CB(skb)->seq; + err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size); + if (err < 0) + return err; + } + + return in_sack; +} + static int tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) { @@ -1333,25 +1365,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; - in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && - !before(end_seq, TCP_SKB_CB(skb)->end_seq); - - if (tcp_skb_pcount(skb) > 1 && !in_sack && - after(TCP_SKB_CB(skb)->end_seq, start_seq)) { - unsigned int pkt_len; - - in_sack = !after(start_seq, - TCP_SKB_CB(skb)->seq); - - if (!in_sack) - pkt_len = (start_seq - - TCP_SKB_CB(skb)->seq); - else - pkt_len = (end_seq - - TCP_SKB_CB(skb)->seq); - if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) - break; - } + in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); + if (in_sack < 0) + break; fack_count += tcp_skb_pcount(skb); |