summaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h190
1 files changed, 143 insertions, 47 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5c472f255b7..a385797f160 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
+extern int sysctl_tcp_frto_response;
extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save;
@@ -230,6 +231,7 @@ extern int sysctl_tcp_mtu_probing;
extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
+extern int sysctl_tcp_max_ssthresh;
extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
@@ -341,6 +343,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
extern int tcp_child_process(struct sock *parent,
struct sock *child,
struct sk_buff *skb);
+extern int tcp_use_frto(struct sock *sk);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
extern void tcp_clear_retrans(struct tcp_sock *tp);
@@ -417,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
/* tcp_output.c */
-extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
- unsigned int cur_mss, int nonagle);
-extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
+extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+ int nonagle);
+extern int tcp_may_send_now(struct sock *sk);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_xmit_retransmit_queue(struct sock *);
extern void tcp_simple_retransmit(struct sock *);
@@ -476,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp)
__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
}
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_fast_path_check(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
@@ -588,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count,
}
}
-static inline void tcp_packets_out_inc(struct sock *sk,
- struct tcp_sock *tp,
+static inline void tcp_packets_out_inc(struct sock *sk,
const struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
int orig = tp->packets_out;
tp->packets_out += tcp_skb_pcount(skb);
@@ -624,9 +629,12 @@ enum tcp_ca_event {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+#define TCP_CONG_NON_RESTRICTED 0x1
+#define TCP_CONG_RTT_STAMP 0x2
+
struct tcp_congestion_ops {
struct list_head list;
- int non_restricted;
+ unsigned long flags;
/* initialize private data (optional) */
void (*init)(struct sock *sk);
@@ -640,8 +648,6 @@ struct tcp_congestion_ops {
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int good_ack);
- /* round trip time sample per acked packet (optional) */
- void (*rtt_sample)(struct sock *sk, u32 usrtt);
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
@@ -649,7 +655,7 @@ struct tcp_congestion_ops {
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
- void (*pkts_acked)(struct sock *sk, u32 num_acked);
+ void (*pkts_acked)(struct sock *sk, u32 num_acked, ktime_t last);
/* get info for inet_diag (optional) */
void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
@@ -736,7 +742,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
tp->left_out = tp->sacked_out + tp->lost_out;
}
-extern void tcp_enter_cwr(struct sock *sk);
+extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
@@ -775,18 +781,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
}
-static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_check_probe_timer(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
+
if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
icsk->icsk_rto, TCP_RTO_MAX);
}
-static inline void tcp_push_pending_frames(struct sock *sk,
- struct tcp_sock *tp)
+static inline void tcp_push_pending_frames(struct sock *sk)
{
- __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
@@ -815,7 +824,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
static inline int tcp_checksum_complete(struct sk_buff *skb)
{
- return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+ return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete(skb);
}
@@ -918,21 +927,7 @@ static inline void tcp_set_state(struct sock *sk, int state)
#endif
}
-static inline void tcp_done(struct sock *sk)
-{
- if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
-
- tcp_set_state(sk, TCP_CLOSE);
- tcp_clear_xmit_timers(sk);
-
- sk->sk_shutdown = SHUTDOWN_MASK;
-
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
- else
- inet_csk_destroy_sock(sk);
-}
+extern void tcp_done(struct sock *sk);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
@@ -981,7 +976,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->wscale_ok = rx_opt->wscale_ok;
ireq->acked = 0;
ireq->ecn_ok = 0;
- ireq->rmt_port = skb->h.th->source;
+ ireq->rmt_port = tcp_hdr(skb)->source;
}
extern void tcp_enter_memory_pressure(void);
@@ -1011,7 +1006,7 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
{
if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
return 0;
- if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
+ if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
return 0;
/* RST segments are not recommended to carry timestamp,
@@ -1026,26 +1021,13 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0;
return 1;
}
#define TCP_CHECK_TIMER(sk) do { } while (0)
-static inline int tcp_use_frto(const struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
-
- /* F-RTO must be activated in sysctl and there must be some
- * unsent new data, and the advertised window should allow
- * sending it.
- */
- return (sysctl_tcp_frto && sk->sk_send_head &&
- !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
- tp->snd_una + tp->snd_wnd));
-}
-
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
@@ -1172,6 +1154,120 @@ static inline void tcp_put_md5sig_pool(void)
put_cpu();
}
+/* write queue abstraction */
+static inline void tcp_write_queue_purge(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+ sk_stream_free_skb(sk, skb);
+ sk_stream_mem_reclaim(sk);
+}
+
+static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_write_queue.next;
+ if (skb == (struct sk_buff *) &sk->sk_write_queue)
+ return NULL;
+ return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_write_queue.prev;
+ if (skb == (struct sk_buff *) &sk->sk_write_queue)
+ return NULL;
+ return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+{
+ return skb->next;
+}
+
+#define tcp_for_write_queue(skb, sk) \
+ for (skb = (sk)->sk_write_queue.next; \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
+#define tcp_for_write_queue_from(skb, sk) \
+ for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
+ skb = skb->next)
+
+static inline struct sk_buff *tcp_send_head(struct sock *sk)
+{
+ return sk->sk_send_head;
+}
+
+static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_send_head = skb->next;
+ if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+ sk->sk_send_head = NULL;
+}
+
+static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
+{
+ if (sk->sk_send_head == skb_unlinked)
+ sk->sk_send_head = NULL;
+}
+
+static inline void tcp_init_send_head(struct sock *sk)
+{
+ sk->sk_send_head = NULL;
+}
+
+static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+}
+
+static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+ __tcp_add_write_queue_tail(sk, skb);
+
+ /* Queue it, remembering where we must start sending. */
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb;
+}
+
+static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_queue_head(&sk->sk_write_queue, skb);
+}
+
+/* Insert buff after skb on the write queue of sk. */
+static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
+ struct sk_buff *buff,
+ struct sock *sk)
+{
+ __skb_append(skb, buff, &sk->sk_write_queue);
+}
+
+/* Insert skb between prev and next on the write queue of sk. */
+static inline void tcp_insert_write_queue_before(struct sk_buff *new,
+ struct sk_buff *skb,
+ struct sock *sk)
+{
+ __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+}
+
+static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
+{
+ __skb_unlink(skb, &sk->sk_write_queue);
+}
+
+static inline int tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ return skb->next == (struct sk_buff *)&sk->sk_write_queue;
+}
+
+static inline int tcp_write_queue_empty(struct sock *sk)
+{
+ return skb_queue_empty(&sk->sk_write_queue);
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,