summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-10-11 17:36:13 -0700
committerDavid S. Miller <davem@davemloft.net>2007-10-11 17:36:13 -0700
commitb08d6cb22c777c8c91c16d8e3b8aafc93c98cbd9 (patch)
tree139b1f2636c42698bd7b0f0ccd61f0e1b8a826ab
parentf785a8e28b9d103c7473655743b6ac1bc3cd3a58 (diff)
[TCP]: Limit processing lost_retrans loop to work-to-do cases
This addition of lost_retrans_low to tcp_sock might be unnecessary, it's not clear how often lost_retrans worker is executed when there wasn't work to do. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/tcp.h2
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv4/tcp_output.c2
3 files changed, 15 insertions, 3 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 9ff456e8d6c..c5b94c1a5ee 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -348,6 +348,8 @@ struct tcp_sock {
int lost_cnt_hint;
int retransmit_cnt_hint;
+ u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
+
u16 advmss; /* Advertised MSS */
u16 prior_ssthresh; /* ssthresh saved at recovery start */
u32 lost_out; /* Lost packets */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d5e0fcc22a3..0a42e934034 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
*
* Search retransmitted skbs from write_queue that were sent when snd_nxt was
* less than what is now known to be received by the other end (derived from
- * SACK blocks by the caller).
+ * SACK blocks by the caller). Also calculate the lowest snd_nxt among the
+ * remaining retransmitted skbs to avoid some costly processing per ACKs.
*/
static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
{
@@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
struct sk_buff *skb;
int flag = 0;
int cnt = 0;
+ u32 new_low_seq = 0;
tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
}
} else {
+ if (!new_low_seq || before(ack_seq, new_low_seq))
+ new_low_seq = ack_seq;
cnt += tcp_skb_pcount(skb);
}
}
+
+ if (tp->retrans_out)
+ tp->lost_retrans_low = new_low_seq;
+
return flag;
}
@@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
}
}
- if (tp->retrans_out && highest_sack_end_seq &&
- after(highest_sack_end_seq, tp->high_seq) &&
+ if (tp->retrans_out &&
+ after(highest_sack_end_seq, tp->lost_retrans_low) &&
icsk->icsk_ca_state == TCP_CA_Recovery)
flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53296753b0b..324b4207254 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1914,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
printk(KERN_DEBUG "retrans_out leaked.\n");
}
#endif
+ if (!tp->retrans_out)
+ tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);