summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c156
1 files changed, 49 insertions, 107 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b14266bb91e..06721392475 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
return err;
}
@@ -288,6 +288,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
+ ip_sk_accept_pmtu(sk) &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
tcp_sync_mss(sk, mtu);
@@ -835,11 +836,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, NULL);
if (skb) {
- __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
+ __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb, queue_mapping);
- err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
- ireq->rmt_addr,
+ err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +973,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
{
union tcp_md5_addr *addr;
- addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+ addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
return tcp_md5_do_lookup(sk, addr, AF_INET);
}
@@ -1149,8 +1150,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
saddr = inet_sk(sk)->inet_saddr;
daddr = inet_sk(sk)->inet_daddr;
} else if (req) {
- saddr = inet_rsk(req)->loc_addr;
- daddr = inet_rsk(req)->rmt_addr;
+ saddr = inet_rsk(req)->ir_loc_addr;
+ daddr = inet_rsk(req)->ir_rmt_addr;
} else {
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
@@ -1366,8 +1367,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
kfree_skb(skb_synack);
return -1;
}
- err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
- ireq->rmt_addr, ireq->opt);
+ err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr, ireq->opt);
err = net_xmit_eval(err);
if (!err)
tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1411,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
inet_csk(child)->icsk_af_ops->rebuild_header(child);
tcp_init_congestion_control(child);
tcp_mtup_init(child);
- tcp_init_buffer_space(child);
tcp_init_metrics(child);
+ tcp_init_buffer_space(child);
/* Queue the data carried in the SYN packet. We need to first
* bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1503,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb);
ireq = inet_rsk(req);
- ireq->loc_addr = daddr;
- ireq->rmt_addr = saddr;
+ ireq->ir_loc_addr = daddr;
+ ireq->ir_rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
@@ -1578,15 +1579,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
if (skb_synack) {
- __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+ __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
} else
goto drop_and_free;
if (likely(!do_fastopen)) {
int err;
- err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
- ireq->rmt_addr, ireq->opt);
+ err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr, ireq->opt);
err = net_xmit_eval(err);
if (err || want_cookie)
goto drop_and_free;
@@ -1644,9 +1645,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
- newinet->inet_daddr = ireq->rmt_addr;
- newinet->inet_rcv_saddr = ireq->loc_addr;
- newinet->inet_saddr = ireq->loc_addr;
+ newinet->inet_daddr = ireq->ir_rmt_addr;
+ newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+ newinet->inet_saddr = ireq->ir_loc_addr;
inet_opt = ireq->opt;
rcu_assign_pointer(newinet->inet_opt, inet_opt);
ireq->opt = NULL;
@@ -2194,18 +2195,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
-static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
-{
- return hlist_nulls_empty(head) ? NULL :
- list_entry(head->first, struct inet_timewait_sock, tw_node);
-}
-
-static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
-{
- return !is_a_nulls(tw->tw_node.next) ?
- hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
-}
-
/*
* Get next listener socket follow cur. If cur is NULL, get first socket
* starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2298,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc;
}
-static inline bool empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(const struct tcp_iter_state *st)
{
- return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
- hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+ return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
}
/*
@@ -2329,7 +2317,6 @@ static void *established_get_first(struct seq_file *seq)
for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
struct sock *sk;
struct hlist_nulls_node *node;
- struct inet_timewait_sock *tw;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2332,7 @@ static void *established_get_first(struct seq_file *seq)
rc = sk;
goto out;
}
- st->state = TCP_SEQ_STATE_TIME_WAIT;
- inet_twsk_for_each(tw, node,
- &tcp_hashinfo.ehash[st->bucket].twchain) {
- if (tw->tw_family != st->family ||
- !net_eq(twsk_net(tw), net)) {
- continue;
- }
- rc = tw;
- goto out;
- }
spin_unlock_bh(lock);
- st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
return rc;
@@ -2365,7 +2341,6 @@ out:
static void *established_get_next(struct seq_file *seq, void *cur)
{
struct sock *sk = cur;
- struct inet_timewait_sock *tw;
struct hlist_nulls_node *node;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
@@ -2373,45 +2348,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
++st->num;
++st->offset;
- if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
- tw = cur;
- tw = tw_next(tw);
-get_tw:
- while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
- tw = tw_next(tw);
- }
- if (tw) {
- cur = tw;
- goto out;
- }
- spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
- st->state = TCP_SEQ_STATE_ESTABLISHED;
-
- /* Look for next non empty bucket */
- st->offset = 0;
- while (++st->bucket <= tcp_hashinfo.ehash_mask &&
- empty_bucket(st))
- ;
- if (st->bucket > tcp_hashinfo.ehash_mask)
- return NULL;
-
- spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
- sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
- } else
- sk = sk_nulls_next(sk);
+ sk = sk_nulls_next(sk);
sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
- goto found;
+ return sk;
}
- st->state = TCP_SEQ_STATE_TIME_WAIT;
- tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
- goto get_tw;
-found:
- cur = sk;
-out:
- return cur;
+ spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+ ++st->bucket;
+ return established_get_first(seq);
}
static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2410,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
if (rc)
break;
st->bucket = 0;
+ st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Fallthrough */
case TCP_SEQ_STATE_ESTABLISHED:
- case TCP_SEQ_STATE_TIME_WAIT:
- st->state = TCP_SEQ_STATE_ESTABLISHED;
if (st->bucket > tcp_hashinfo.ehash_mask)
break;
rc = established_get_first(seq);
@@ -2524,7 +2469,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
break;
case TCP_SEQ_STATE_ESTABLISHED:
- case TCP_SEQ_STATE_TIME_WAIT:
rc = established_get_next(seq, v);
break;
}
@@ -2548,7 +2492,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
if (v != SEQ_START_TOKEN)
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break;
- case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2598,18 +2541,18 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
EXPORT_SYMBOL(tcp_proc_unregister);
static void get_openreq4(const struct sock *sk, const struct request_sock *req,
- struct seq_file *f, int i, kuid_t uid, int *len)
+ struct seq_file *f, int i, kuid_t uid)
{
const struct inet_request_sock *ireq = inet_rsk(req);
long delta = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
i,
- ireq->loc_addr,
+ ireq->ir_loc_addr,
ntohs(inet_sk(sk)->inet_sport),
- ireq->rmt_addr,
- ntohs(ireq->rmt_port),
+ ireq->ir_rmt_addr,
+ ntohs(ireq->ir_rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
@@ -2619,11 +2562,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
0, /* non standard timer */
0, /* open_requests have no inode */
atomic_read(&sk->sk_refcnt),
- req,
- len);
+ req);
}
-static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
+static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
{
int timer_active;
unsigned long timer_expires;
@@ -2662,7 +2604,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
- "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
rx_queue,
@@ -2679,12 +2621,11 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
tp->snd_cwnd,
sk->sk_state == TCP_LISTEN ?
(fastopenq ? fastopenq->max_qlen : 0) :
- (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
- len);
+ (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
}
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
- struct seq_file *f, int i, int *len)
+ struct seq_file *f, int i)
{
__be32 dest, src;
__u16 destp, srcp;
@@ -2696,10 +2637,10 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
srcp = ntohs(tw->tw_sport);
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
- atomic_read(&tw->tw_refcnt), tw, len);
+ atomic_read(&tw->tw_refcnt), tw);
}
#define TMPSZ 150
@@ -2707,11 +2648,11 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
static int tcp4_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
- int len;
+ struct sock *sk = v;
+ seq_setwidth(seq, TMPSZ - 1);
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "%-*s\n", TMPSZ - 1,
- " sl local_address rem_address st tx_queue "
+ seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode");
goto out;
@@ -2721,17 +2662,17 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED:
- get_tcp4_sock(v, seq, st->num, &len);
+ if (sk->sk_state == TCP_TIME_WAIT)
+ get_timewait4_sock(v, seq, st->num);
+ else
+ get_tcp4_sock(v, seq, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
- get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
- break;
- case TCP_SEQ_STATE_TIME_WAIT:
- get_timewait4_sock(v, seq, st->num, &len);
+ get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
break;
}
- seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
out:
+ seq_pad(seq, '\n');
return 0;
}
@@ -2806,6 +2747,7 @@ struct proto tcp_prot = {
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
+ .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,