summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_veno.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-07 10:17:56 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-07 10:17:56 +0200
commitf1615bbe9be4def59c3b3eaddb60722efeed16c2 (patch)
treeca3020e65447576fc1826e819651e6ba072030b5 /net/ipv4/tcp_veno.c
parentcfb3c0ab0903abb6ea5215b37eebd9c2a1f057eb (diff)
parentcd3de83f147601356395b57a8673e9c5ff1e59d1 (diff)
Merge tag 'v3.16-rc4' into drm-intel-next-queued
Due to Dave's vacation drm-next hasn't opened yet for 3.17 so I couldn't move my drm-intel-next queue forward yet like I usually do. Just pull in the latest upstream -rc to unblock patch merging - I don't want to needlessly rebase my current patch pile really and void all the testing we've done already. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'net/ipv4/tcp_veno.c')
-rw-r--r--net/ipv4/tcp_veno.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 1b8e28fcd7e..27b9825753d 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
tcp_veno_init(sk);
}
-static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
- u32 in_flight)
+static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
if (!veno->doing_veno_now) {
- tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+ tcp_reno_cong_avoid(sk, ack, acked);
return;
}
/* limited by applications */
- if (!tcp_is_cwnd_limited(sk, in_flight))
+ if (!tcp_is_cwnd_limited(sk))
return;
/* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
/* We don't have enough rtt samples to do the Veno
* calculation, so we'll behave like Reno.
*/
- tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+ tcp_reno_cong_avoid(sk, ack, acked);
} else {
u64 target_cwnd;
u32 rtt;