summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 12:30:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 12:30:19 -0800
commitabb359450f20c32ae03039d8736f12b1d561caf5 (patch)
tree6e8723885feb66a138f19f0ff31615dc13a8d859 /net/ipv4
parentcb600d2f83c854ec3d6660063e4466431999489b (diff)
parent4e3dbdb1392a83bd21a6ff8f6bc785495058d37c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1436 commits) cassini: Use local-mac-address prom property for Cassini MAC address net: remove the duplicate #ifdef __KERNEL__ net: bridge: check the length of skb after nf_bridge_maybe_copy_header() netconsole: clarify stopping message netconsole: don't announce stopping if nothing happened cnic: Fix the type field in SPQ messages netfilter: fix export secctx error handling netfilter: fix the race when initializing nf_ct_expect_hash_rnd ipv4: IP defragmentation must be ECN aware net: r6040: Return proper error for r6040_init_one dcb: use after free in dcb_flushapp() dcb: unlock on error in dcbnl_ieee_get() net: ixp4xx_eth: Return proper error for eth_init_one include/linux/if_ether.h: Add #define ETH_P_LINK_CTL for HPNA and wlan local tunnel net: add POLLPRI to sock_def_readable() af_unix: Avoid socket->sk NULL OOPS in stream connect security hooks. net_sched: pfifo_head_drop problem mac80211: remove stray extern mac80211: implement off-channel TX using hw r-o-c offload mac80211: implement hardware offload for remain-on-channel ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/arp.c31
-rw-r--r--net/ipv4/devinet.c97
-rw-r--r--net/ipv4/esp4.c32
-rw-r--r--net/ipv4/fib_frontend.c34
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/icmp.c32
-rw-r--r--net/ipv4/igmp.c282
-rw-r--r--net/ipv4/inet_connection_sock.c22
-rw-r--r--net/ipv4/inetpeer.c167
-rw-r--r--net/ipv4/ip_fragment.c36
-rw-r--r--net/ipv4/ip_gre.c52
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ipconfig.c32
-rw-r--r--net/ipv4/ipip.c21
-rw-r--r--net/ipv4/ipmr.c20
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c2
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c252
-rw-r--r--net/ipv4/syncookies.c15
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_input.c51
-rw-r--r--net/ipv4/tcp_ipv4.c74
-rw-r--r--net/ipv4/tcp_minisocks.c63
-rw-r--r--net/ipv4/tcp_output.c37
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv4/xfrm4_policy.c47
33 files changed, 815 insertions, 708 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f581f77d109..f2b61107df6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1148,21 +1148,13 @@ int inet_sk_rebuild_header(struct sock *sk)
struct flowi fl = {
.oif = sk->sk_bound_dev_if,
.mark = sk->sk_mark,
- .nl_u = {
- .ip4_u = {
- .daddr = daddr,
- .saddr = inet->inet_saddr,
- .tos = RT_CONN_FLAGS(sk),
- },
- },
+ .fl4_dst = daddr,
+ .fl4_src = inet->inet_saddr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
.proto = sk->sk_protocol,
.flags = inet_sk_flowi_flags(sk),
- .uli_u = {
- .ports = {
- .sport = inet->inet_sport,
- .dport = inet->inet_dport,
- },
- },
+ .fl_ip_sport = inet->inet_sport,
+ .fl_ip_dport = inet->inet_dport,
};
security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index d8e540c5b07..a2fc7b961db 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -433,8 +433,8 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
{
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
- .saddr = tip } } };
+ struct flowi fl = { .fl4_dst = sip,
+ .fl4_src = tip };
struct rtable *rt;
int flag = 0;
/*unsigned long now; */
@@ -883,7 +883,7 @@ static int arp_process(struct sk_buff *skb)
dont_send = arp_ignore(in_dev, sip, tip);
if (!dont_send && IN_DEV_ARPFILTER(in_dev))
- dont_send |= arp_filter(sip, tip, dev);
+ dont_send = arp_filter(sip, tip, dev);
if (!dont_send) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n) {
@@ -1017,13 +1017,14 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
return 0;
}
- if (__in_dev_get_rtnl(dev)) {
- IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
+ if (__in_dev_get_rcu(dev)) {
+ IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
return 0;
}
return -ENXIO;
}
+/* must be called with rcu_read_lock() */
static int arp_req_set_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1033,7 +1034,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
if (mask && mask != htonl(0xFFFFFFFF))
return -EINVAL;
if (!dev && (r->arp_flags & ATF_COM)) {
- dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
+ dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
r->arp_ha.sa_data);
if (!dev)
return -ENODEV;
@@ -1061,8 +1062,8 @@ static int arp_req_set(struct net *net, struct arpreq *r,
if (r->arp_flags & ATF_PERM)
r->arp_flags |= ATF_COM;
if (dev == NULL) {
- struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } };
+ struct flowi fl = { .fl4_dst = ip,
+ .fl4_tos = RTO_ONLINK };
struct rtable *rt;
err = ip_route_output_key(net, &rt, &fl);
if (err != 0)
@@ -1169,8 +1170,8 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
if (dev == NULL) {
- struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } };
+ struct flowi fl = { .fl4_dst = ip,
+ .fl4_tos = RTO_ONLINK };
struct rtable *rt;
err = ip_route_output_key(net, &rt, &fl);
if (err != 0)
@@ -1225,10 +1226,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
htonl(0xFFFFFFFFUL);
- rtnl_lock();
+ rcu_read_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- dev = __dev_get_by_name(net, r.arp_dev);
+ dev = dev_get_by_name_rcu(net, r.arp_dev);
if (dev == NULL)
goto out;
@@ -1252,12 +1253,12 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
case SIOCGARP:
err = arp_req_get(&r, dev);
- if (!err && copy_to_user(arg, &r, sizeof(r)))
- err = -EFAULT;
break;
}
out:
- rtnl_unlock();
+ rcu_read_unlock();
+ if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
+ err = -EFAULT;
return err;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index dc94b0316b7..748cb5b337b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1256,6 +1256,87 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
+static size_t inet_get_link_af_size(const struct net_device *dev)
+{
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
+
+ if (!in_dev)
+ return 0;
+
+ return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
+}
+
+static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
+ struct nlattr *nla;
+ int i;
+
+ if (!in_dev)
+ return -ENODATA;
+
+ nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
+ if (nla == NULL)
+ return -EMSGSIZE;
+
+ for (i = 0; i < IPV4_DEVCONF_MAX; i++)
+ ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+
+ return 0;
+}
+
+static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
+ [IFLA_INET_CONF] = { .type = NLA_NESTED },
+};
+
+static int inet_validate_link_af(const struct net_device *dev,
+ const struct nlattr *nla)
+{
+ struct nlattr *a, *tb[IFLA_INET_MAX+1];
+ int err, rem;
+
+ if (dev && !__in_dev_get_rtnl(dev))
+ return -EAFNOSUPPORT;
+
+ err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[IFLA_INET_CONF]) {
+ nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
+ int cfgid = nla_type(a);
+
+ if (nla_len(a) < 4)
+ return -EINVAL;
+
+ if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
+ struct nlattr *a, *tb[IFLA_INET_MAX+1];
+ int rem;
+
+ if (!in_dev)
+ return -EAFNOSUPPORT;
+
+ if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
+ BUG();
+
+ if (tb[IFLA_INET_CONF]) {
+ nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
+ ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_SYSCTL
static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1349,9 +1430,9 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
return ret;
}
-int ipv4_doint_and_flush(ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
@@ -1619,6 +1700,14 @@ static __net_initdata struct pernet_operations devinet_ops = {
.exit = devinet_exit_net,
};
+static struct rtnl_af_ops inet_af_ops = {
+ .family = AF_INET,
+ .fill_link_af = inet_fill_link_af,
+ .get_link_af_size = inet_get_link_af_size,
+ .validate_link_af = inet_validate_link_af,
+ .set_link_af = inet_set_link_af,
+};
+
void __init devinet_init(void)
{
register_pernet_subsys(&devinet_ops);
@@ -1626,6 +1715,8 @@ void __init devinet_init(void)
register_gifconf(PF_INET, inet_gifconf);
register_netdevice_notifier(&ip_netdev_notifier);
+ rtnl_af_register(&inet_af_ops);
+
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1c3fb..e42a905180f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@ struct esp_skb_cb {
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
+
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
@@ -117,25 +119,35 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int blksize;
int clen;
int alen;
+ int plen;
+ int tfclen;
int nfrags;
/* skb is pure payload to encrypt */
err = -ENOMEM;
- /* Round to block size */
- clen = skb->len;
-
esp = x->data;
aead = esp->aead;
alen = crypto_aead_authsize(aead);
+ tfclen = 0;
+ if (x->tfcpad) {
+ struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+ u32 padto;
+
+ padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
+ if (skb->len < padto)
+ tfclen = padto - skb->len;
+ }
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
- clen = ALIGN(clen + 2, blksize);
+ clen = ALIGN(skb->len + 2 + tfclen, blksize);
if (esp->padlen)
clen = ALIGN(clen, esp->padlen);
+ plen = clen - skb->len - tfclen;
- if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+ err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
+ if (err < 0)
goto error;
nfrags = err;
@@ -150,13 +162,17 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
/* Fill padding... */
tail = skb_tail_pointer(trailer);
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
do {
int i;
- for (i=0; i<clen-skb->len - 2; i++)
+ for (i = 0; i < plen - 2; i++)
tail[i] = i + 1;
} while (0);
- tail[clen - skb->len - 2] = (clen - skb->len) - 2;
- tail[clen - skb->len - 1] = *skb_mac_header(skb);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb));
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c19c1f739fb..1d2cdd43a87 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -158,11 +158,7 @@ static void fib_flush(struct net *net)
struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = addr
- }
- },
+ .fl4_dst = addr,
};
struct fib_result res = { 0 };
struct net_device *dev = NULL;
@@ -199,7 +195,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
const struct net_device *dev,
__be32 addr)
{
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
+ struct flowi fl = { .fl4_dst = addr };
struct fib_result res;
unsigned ret = RTN_BROADCAST;
struct fib_table *local_table;
@@ -253,13 +249,9 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
{
struct in_device *in_dev;
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = src,
- .saddr = dst,
- .tos = tos
- }
- },
+ .fl4_dst = src,
+ .fl4_src = dst,
+ .fl4_tos = tos,
.mark = mark,
.iif = oif
};
@@ -859,13 +851,9 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
struct fib_result res;
struct flowi fl = {
.mark = frn->fl_mark,
- .nl_u = {
- .ip4_u = {
- .daddr = frn->fl_addr,
- .tos = frn->fl_tos,
- .scope = frn->fl_scope
- }
- }
+ .fl4_dst = frn->fl_addr,
+ .fl4_tos = frn->fl_tos,
+ .fl4_scope = frn->fl_scope,
};
#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1005,7 +993,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), 0);
break;
case NETDEV_UNREGISTER_BATCH:
- rt_cache_flush_batch();
+ /* The batch unregister is only called on the first
+ * device in the list of devices being unregistered.
+ * Therefore we should not pass dev_net(dev) in here.
+ */
+ rt_cache_flush_batch(NULL);
break;
}
return NOTIFY_DONE;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3e0da3ef611..12d3dc3df1b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -563,12 +563,8 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
rcu_read_lock();
{
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = nh->nh_gw,
- .scope = cfg->fc_scope + 1,
- },
- },
+ .fl4_dst = nh->nh_gw,
+ .fl4_scope = cfg->fc_scope + 1,
.oif = nh->nh_oif,
};
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5d1a44bcbd..4aa1b7f01ea 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -386,10 +386,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
daddr = icmp_param->replyopts.faddr;
}
{
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = daddr,
- .saddr = rt->rt_spec_dst,
- .tos = RT_TOS(ip_hdr(skb)->tos) } },
+ struct flowi fl = { .fl4_dst= daddr,
+ .fl4_src = rt->rt_spec_dst,
+ .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
.proto = IPPROTO_ICMP };
security_skb_classify_flow(skb, &fl);
if (ip_route_output_key(net, &rt, &fl))
@@ -506,8 +505,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
struct net_device *dev = NULL;
rcu_read_lock();
- if (rt->fl.iif &&
- net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
+ if (rt_is_input_route(rt) &&
+ net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, rt->fl.iif);
if (dev)
@@ -542,22 +541,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = icmp_param.replyopts.srr ?
- icmp_param.replyopts.faddr :
- iph->saddr,
- .saddr = saddr,
- .tos = RT_TOS(tos)
- }
- },
+ .fl4_dst = icmp_param.replyopts.srr ?
+ icmp_param.replyopts.faddr : iph->saddr,
+ .fl4_src = saddr,
+ .fl4_tos = RT_TOS(tos),
.proto = IPPROTO_ICMP,
- .uli_u = {
- .icmpt = {
- .type = type,
- .code = code
- }
- }
+ .fl_icmp_type = type,
+ .fl_icmp_code = code,
};
int err;
struct rtable *rt2;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 3c53c2d89e3..e0e77e297de 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,21 +149,37 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta);
+
+static void ip_mc_list_reclaim(struct rcu_head *head)
+{
+ kfree(container_of(head, struct ip_mc_list, rcu));
+}
+
static void ip_ma_put(struct ip_mc_list *im)
{
if (atomic_dec_and_test(&im->refcnt)) {
in_dev_put(im->interface);
- kfree(im);
+ call_rcu(&im->rcu, ip_mc_list_reclaim);
}
}
+#define for_each_pmc_rcu(in_dev, pmc) \
+ for (pmc = rcu_dereference(in_dev->mc_list); \
+ pmc != NULL; \
+ pmc = rcu_dereference(pmc->next_rcu))
+
+#define for_each_pmc_rtnl(in_dev, pmc) \
+ for (pmc = rtnl_dereference(in_dev->mc_list); \
+ pmc != NULL; \
+ pmc = rtnl_dereference(pmc->next_rcu))
+
#ifdef CONFIG_IP_MULTICAST
/*
* Timer management
*/
-static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
+static void igmp_stop_timer(struct ip_mc_list *im)
{
spin_lock_bh(&im->lock);
if (del_timer(&im->timer))
@@ -284,6 +300,8 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
return scount;
}
+#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
+
static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
{
struct sk_buff *skb;
@@ -292,14 +310,20 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
- skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == NULL)
- return NULL;
+ while (1) {
+ skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (skb)
+ break;
+ size >>= 1;
+ if (size < 256)
+ return NULL;
+ }
+ igmp_skb_size(skb) = size;
{
struct flowi fl = { .oif = dev->ifindex,
- .nl_u = { .ip4_u = {
- .daddr = IGMPV3_ALL_MCR } },
+ .fl4_dst = IGMPV3_ALL_MCR,
.proto = IPPROTO_IGMP };
if (ip_route_output_key(net, &rt, &fl)) {
kfree_skb(skb);
@@ -384,7 +408,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
return skb;
}
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
+#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
skb_tailroom(skb)) : 0)
static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -502,8 +526,8 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
int type;
if (!pmc) {
- read_lock(&in_dev->mc_list_lock);
- for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, pmc) {
if (pmc->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&pmc->lock);
@@ -514,7 +538,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
} else {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +580,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
struct sk_buff *skb = NULL;
int type, dtype;
- read_lock(&in_dev->mc_list_lock);
+ rcu_read_lock();
spin_lock_bh(&in_dev->mc_tomb_lock);
/* deleted MCA's */
@@ -593,7 +617,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
spin_unlock_bh(&in_dev->mc_tomb_lock);
/* change recs */
- for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rcu(in_dev, pmc) {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE]) {
type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +640,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
}
spin_unlock_bh(&pmc->lock);
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
if (!skb)
return;
@@ -644,7 +668,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
{
struct flowi fl = { .oif = dev->ifindex,
- .nl_u = { .ip4_u = { .daddr = dst } },
+ .fl4_dst = dst,
.proto = IPPROTO_IGMP };
if (ip_route_output_key(net, &rt, &fl))
return -1;
@@ -813,14 +837,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
if (group == IGMP_ALL_HOSTS)
return;
- read_lock(&in_dev->mc_list_lock);
- for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == group) {
igmp_stop_timer(im);
break;
}
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
}
static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +930,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
- read_lock(&in_dev->mc_list_lock);
- for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, im) {
int changed;
if (group && group != im->multiaddr)
@@ -925,7 +949,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
if (changed)
igmp_mod_timer(im, max_delay);
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
}
/* called in rcu_read_lock() section */
@@ -961,7 +985,7 @@ int igmp_rcv(struct sk_buff *skb)
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
- if (skb_rtable(skb)->fl.iif == 0)
+ if (rt_is_output_route(skb_rtable(skb)))
break;
/* don't rely on MC router hearing unicast reports */
if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1134,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
kfree(pmc);
}
/* clear dead sources, too */
- read_lock(&in_dev->mc_list_lock);
- for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, pmc) {
struct ip_sf_list *psf, *psf_next;
spin_lock_bh(&pmc->lock);
@@ -1123,7 +1147,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
kfree(psf);
}
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
}
#endif
@@ -1209,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
- for (im=in_dev->mc_list; im; im=im->next) {
+ for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == addr) {
im->users++;
ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1241,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
}
}
- im = kmalloc(sizeof(*im), GFP_KERNEL);
+ im = kzalloc(sizeof(*im), GFP_KERNEL);
if (!im)
goto out;
@@ -1227,26 +1251,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
im->multiaddr = addr;
/* initial mode is (EX, empty) */
im->sfmode = MCAST_EXCLUDE;
- im->sfcount[MCAST_INCLUDE] = 0;
im->sfcount[MCAST_EXCLUDE] = 1;
- im->sources = NULL;
- im->tomb = NULL;
- im->crcount = 0;
atomic_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
- im->tm_running = 0;
setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
- im->reporter = 0;
- im->gsquery = 0;
#endif
- im->loaded = 0;
- write_lock_bh(&in_dev->mc_list_lock);
- im->next = in_dev->mc_list;
- in_dev->mc_list = im;
+
+ im->next_rcu = in_dev->mc_list;
in_dev->mc_count++;
- write_unlock_bh(&in_dev->mc_list_lock);
+ rcu_assign_pointer(in_dev->mc_list, im);
+
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr);
#endif
@@ -1260,26 +1276,32 @@ EXPORT_SYMBOL(ip_mc_inc_group);
/*
* Resend IGMP JOIN report; used for bonding.
+ * Called with rcu_read_lock()
*/
-void ip_mc_rejoin_group(struct ip_mc_list *im)
+void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
- struct in_device *in_dev = im->interface;
+ struct ip_mc_list *im;
+ int type;
- if (im->multiaddr == IGMP_ALL_HOSTS)
- return;
+ for_each_pmc_rcu(in_dev, im) {
+ if (im->multiaddr == IGMP_ALL_HOSTS)
+ continue;
- /* a failover is happening and switches
- * must be notified immediately */
- if (IGMP_V1_SEEN(in_dev))
- igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
- else if (IGMP_V2_SEEN(in_dev))
- igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
- else
- igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
+ /* a failover is happening and switches
+ * must be notified immediately
+ */
+ if (IGMP_V1_SEEN(in_dev))
+ type = IGMP_HOST_MEMBERSHIP_REPORT;
+ else if (IGMP_V2_SEEN(in_dev))
+ type = IGMPV2_HOST_MEMBERSHIP_REPORT;
+ else
+ type = IGMPV3_HOST_MEMBERSHIP_REPORT;
+ igmp_send_report(in_dev, im, type);
+ }
#endif
}
-EXPORT_SYMBOL(ip_mc_rejoin_group);
+EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
@@ -1287,17 +1309,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
{
- struct ip_mc_list *i, **ip;
+ struct ip_mc_list *i;
+ struct ip_mc_list __rcu **ip;
ASSERT_RTNL();
- for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
+ for (ip = &in_dev->mc_list;
+ (i = rtnl_dereference(*ip)) != NULL;
+ ip = &i->next_rcu) {
if (i->multiaddr == addr) {
if (--i->users == 0) {
- write_lock_bh(&in_dev->mc_list_lock);
- *ip = i->next;
+ *ip = i->next_rcu;
in_dev->mc_count--;
- write_unlock_bh(&in_dev->mc_list_lock);
igmp_group_dropped(i);
if (!in_dev->dead)
@@ -1316,34 +1339,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
void ip_mc_unmap(struct in_device *in_dev)
{
- struct ip_mc_list *i;
+ struct ip_mc_list *pmc;
ASSERT_RTNL();
- for (i = in_dev->mc_list; i; i = i->next)
- igmp_group_dropped(i);
+ for_each_pmc_rtnl(in_dev, pmc)
+ igmp_group_dropped(pmc);
}
void ip_mc_remap(struct in_device *in_dev)
{
- struct ip_mc_list *i;
+ struct ip_mc_list *pmc;
ASSERT_RTNL();
- for (i = in_dev->mc_list; i; i = i->next)
- igmp_group_added(i);
+ for_each_pmc_rtnl(in_dev, pmc)
+ igmp_group_added(pmc);
}
/* Device going down */
void ip_mc_down(struct in_device *in_dev)
{
- struct ip_mc_list *i;
+ struct ip_mc_list *pmc;
ASSERT_RTNL();
- for (i=in_dev->mc_list; i; i=i->next)
- igmp_group_dropped(i);
+ for_each_pmc_rtnl(in_dev, pmc)
+ igmp_group_dropped(pmc);
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_ifc_count = 0;
@@ -1374,7 +1397,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
#endif
- rwlock_init(&in_dev->mc_list_lock);
spin_lock_init(&in_dev->mc_tomb_lock);
}
@@ -1382,14 +1404,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
void ip_mc_up(struct in_device *in_dev)
{
- struct ip_mc_list *i;
+ struct ip_mc_list *pmc;
ASSERT_RTNL();
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
- for (i=in_dev->mc_list; i; i=i->next)
- igmp_group_added(i);
+ for_each_pmc_rtnl(in_dev, pmc)
+ igmp_group_added(pmc);
}
/*
@@ -1405,24 +1427,19 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
/* Deactivate timers */
ip_mc_down(in_dev);
- write_lock_bh(&in_dev->mc_list_lock);
- while ((i = in_dev->mc_list) != NULL) {
- in_dev->mc_list = i->next;
+ while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+ in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
- write_unlock_bh(&in_dev->mc_list_lock);
+
igmp_group_dropped(i);
ip_ma_put(i);
-
- write_lock_bh(&in_dev->mc_list_lock);
}
- write_unlock_bh(&in_dev->mc_list_lock);
}
/* RTNL is locked */
static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = imr->imr_multiaddr.s_addr } } };
+ struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
struct rtable *rt;
struct net_device *dev = NULL;
struct in_device *idev = NULL;
@@ -1513,18 +1530,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
if (!in_dev)
return -ENODEV;
- read_lock(&in_dev->mc_list_lock);
- for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
#endif
@@ -1685,18 +1702,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
if (!in_dev)
return -ENODEV;
- read_lock(&in_dev->mc_list_lock);
- for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
@@ -1793,7 +1810,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
err = -EADDRINUSE;
ifindex = imr->imr_ifindex;
- for (i = inet->mc_list; i; i = i->next) {
+ for_each_pmc_rtnl(inet, i) {
if (i->multi.imr_multiaddr.s_addr == addr &&
i->multi.imr_ifindex == ifindex)
goto done;
@@ -1807,7 +1824,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
goto done;
memcpy(&iml->multi, imr, sizeof(*imr));
- iml->next = inet->mc_list;
+ iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1838,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
static void ip_sf_socklist_reclaim(struct rcu_head *rp)
{
- struct ip_sf_socklist *psf;
-
- psf = container_of(rp, struct ip_sf_socklist, rcu);
+ kfree(container_of(rp, struct ip_sf_socklist, rcu));
/* sk_omem_alloc should have been decreased by the caller*/
- kfree(psf);
}
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
- struct ip_sf_socklist *psf = iml->sflist;
+ struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
int err;
if (psf == NULL) {
@@ -1851,11 +1865,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
static void ip_mc_socklist_reclaim(struct rcu_head *rp)
{
- struct ip_mc_socklist *iml;
-
- iml = container_of(rp, struct ip_mc_socklist, rcu);
+ kfree(container_of(rp, struct ip_mc_socklist, rcu));
/* sk_omem_alloc should have been decreased by the caller*/
- kfree(iml);
}
@@ -1866,7 +1877,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct inet_sock *inet = inet_sk(sk);
- struct ip_mc_socklist *iml, **imlp;
+ struct ip_mc_socklist *iml;
+ struct ip_mc_socklist __rcu **imlp;
struct in_device *in_dev;
struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1888,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex;
- for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
+ for (imlp = &inet->mc_list;
+ (iml = rtnl_dereference(*imlp)) != NULL;
+ imlp = &iml->next_rcu) {
if (iml->multi.imr_multiaddr.s_addr != group)
continue;
if (ifindex) {
@@ -1888,7 +1902,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
(void) ip_mc_leave_src(sk, iml, in_dev);
- rcu_assign_pointer(*imlp, iml->next);
+ *imlp = iml->next_rcu;
if (in_dev)
ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1948,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
}
err = -EADDRNOTAVAIL;
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rtnl(inet, pmc) {
if ((pmc->multi.imr_multiaddr.s_addr ==
imr.imr_multiaddr.s_addr) &&
(pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1972,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
pmc->sfmode = omode;
}
- psl = pmc->sflist;
+ psl = rtnl_dereference(pmc->sflist);
if (!add) {
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2091,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
goto done;
}
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
@@ -2107,7 +2121,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
msf->imsf_fmode, 0, NULL, 0);
}
- psl = pmc->sflist;
+ psl = rtnl_dereference(pmc->sflist);
if (psl) {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2169,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
}
err = -EADDRNOTAVAIL;
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
@@ -2163,7 +2177,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
if (!pmc) /* must have a prior join */
goto done;
msf->imsf_fmode = pmc->sfmode;
- psl = pmc->sflist;
+ psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
if (!psl) {
len = 0;
@@ -2208,7 +2222,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
err = -EADDRNOTAVAIL;
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == addr &&
pmc->multi.imr_ifindex == gsf->gf_interface)
break;
@@ -2216,7 +2230,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!pmc) /* must have a prior join */
goto done;
gsf->gf_fmode = pmc->sfmode;
- psl = pmc->sflist;
+ psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
count = psl ? psl->sl_count : 0;
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2271,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
goto out;
rcu_read_lock();
- for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
+ for_each_pmc_rcu(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc->multi.imr_ifindex == dif)
break;
@@ -2265,7 +2279,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
ret = inet->mc_all;
if (!pmc)
goto unlock;
- psl = pmc->sflist;
+ psl = rcu_dereference(pmc->sflist);
ret = (pmc->sfmode == MCAST_EXCLUDE);
if (!psl)
goto unlock;
@@ -2300,10 +2314,10 @@ void ip_mc_drop_socket(struct sock *sk)
return;
rtnl_lock();
- while ((iml = inet->mc_list) != NULL) {
+ while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
struct in_device *in_dev;
- rcu_assign_pointer(inet->mc_list, iml->next);
+ inet->mc_list = iml->next_rcu;
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL)
@@ -2321,8 +2335,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
struct ip_sf_list *psf;
int rv = 0;
- read_lock(&in_dev->mc_list_lock);
- for (im=in_dev->mc_list; im; im=im->next) {
+ rcu_read_lock();
+ for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == mc_addr)
break;
}
@@ -2343,7 +2357,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
} else
rv = 1; /* unspecified source; tentatively allow */
}
- read_unlock(&in_dev->mc_list_lock);
+ rcu_read_unlock();
return rv;
}
@@ -2369,13 +2383,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
in_dev = __in_dev_get_rcu(state->dev);
if (!in_dev)
continue;
- read_lock(&in_dev->mc_list_lock);
- im = in_dev->mc_list;
+ im = rcu_dereference(in_dev->mc_list);
if (im) {
state->in_dev = in_dev;
break;
}
- read_unlock(&in_dev->mc_list_lock);
}
return im;
}
@@ -2383,11 +2395,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
- im = im->next;
- while (!im) {
- if (likely(state->in_dev != NULL))
- read_unlock(&state->in_dev->mc_list_lock);
+ im = rcu_dereference(im->next_rcu);
+ while (!im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->in_dev = NULL;
@@ -2396,8 +2406,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
state->in_dev = __in_dev_get_rcu(state->dev);
if (!state->in_dev)
continue;
- read_lock(&state->in_dev->mc_list_lock);
- im = state->in_dev->mc_list;
+ im = rcu_dereference(state->in_dev->mc_list);
}
return im;
}
@@ -2433,10 +2442,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
- if (likely(state->in_dev != NULL)) {
- read_unlock(&state->in_dev->mc_list_lock);
- state->in_dev = NULL;
- }
+
+ state->in_dev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
@@ -2458,7 +2465,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
querier = "NONE";
#endif
- if (state->in_dev->mc_list == im) {
+ if (rcu_dereference(state->in_dev->mc_list) == im) {
seq_printf(seq, "%d\t%-10s: %5d %7s\n",
state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
}
@@ -2517,8 +2524,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
idev = __in_dev_get_rcu(state->dev);
if (unlikely(idev == NULL))
continue;
- read_lock(&idev->mc_list_lock);
- im = idev->mc_list;
+ im = rcu_dereference(idev->mc_list);
if (likely(im != NULL)) {
spin_lock_bh(&im->lock);
psf = im->sources;
@@ -2529,7 +2535,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
}
spin_unlock_bh(&im->lock);
}
- read_unlock(&idev->mc_list_lock);
}
return psf;
}
@@ -2543,9 +2548,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
- if (likely(state->idev != NULL))
- read_unlock(&state->idev->mc_list_lock);
-
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
@@ -2554,8 +2556,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
- read_lock(&state->idev->mc_list_lock);
- state->im = state->idev->mc_list;
+ state->im = rcu_dereference(state->idev->mc_list);
}
if (!state->im)
break;
@@ -2601,10 +2602,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
spin_unlock_bh(&state->im->lock);
state->im = NULL;
}
- if (likely(state->idev != NULL)) {
- read_unlock(&state->idev->mc_list_lock);
- state->idev = NULL;
- }
+ state->idev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7174370b119..25e318153f1 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -55,7 +55,6 @@ EXPORT_SYMBOL(inet_get_local_port_range);
int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
- const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
@@ -75,9 +74,9 @@ int inet_csk_bind_conflict(const struct sock *sk,
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
- const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
- if (!sk2_rcv_saddr || !sk_rcv_saddr ||
- sk2_rcv_saddr == sk_rcv_saddr)
+ const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+ if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+ sk2_rcv_saddr == sk_rcv_saddr(sk))
break;
}
}
@@ -358,17 +357,14 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
struct ip_options *opt = inet_rsk(req)->opt;
struct flowi fl = { .oif = sk->sk_bound_dev_if,
.mark = sk->sk_mark,
- .nl_u = { .ip4_u =
- { .daddr = ((opt && opt->srr) ?
- opt->faddr :
- ireq->rmt_addr),
- .saddr = ireq->loc_addr,
- .tos = RT_CONN_FLAGS(sk) } },
+ .fl4_dst = ((opt && opt->srr) ?
+ opt->faddr : ireq->rmt_addr),
+ .fl4_src = ireq->loc_addr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
.proto = sk->sk_protocol,
.flags = inet_sk_flowi_flags(sk),
- .uli_u = { .ports =
- { .sport = inet_sk(sk)->inet_sport,
- .dport = ireq->rmt_port } } };
+ .fl_ip_sport = inet_sk(sk)->inet_sport,
+ .fl_ip_dport = ireq->rmt_port };
struct net *net = sock_net(sk);
security_req_classify_flow(req, &fl);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9e94d7cf4f8..d9bc85751c7 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -63,7 +63,7 @@
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* dtime: unused node list lock
- * v4daddr: unchangeable
+ * daddr: unchangeable
* ip_id_count: atomic value (no lock needed)
*/
@@ -79,15 +79,24 @@ static const struct inet_peer peer_fake_node = {
.avl_height = 0
};
-static struct {
+struct inet_peer_base {
struct inet_peer __rcu *root;
spinlock_t lock;
int total;
-} peers = {
+};
+
+static struct inet_peer_base v4_peers = {
+ .root = peer_avl_empty_rcu,
+ .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
+ .total = 0,
+};
+
+static struct inet_peer_base v6_peers = {
.root = peer_avl_empty_rcu,
- .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
.total = 0,
};
+
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
/* Exported for sysctl_net_ipv4. */
@@ -152,28 +161,45 @@ static void unlink_from_unused(struct inet_peer *p)
}
}
+static int addr_compare(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ int i, n = (a->family == AF_INET ? 1 : 4);
+
+ for (i = 0; i < n; i++) {
+ if (a->a6[i] == b->a6[i])
+ continue;
+ if (a->a6[i] < b->a6[i])
+ return -1;
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Called with local BH disabled and the pool lock held.
*/
-#define lookup(_daddr, _stack) \
+#define lookup(_daddr, _stack, _base) \
({ \
struct inet_peer *u; \
struct inet_peer __rcu **v; \
\
stackptr = _stack; \
- *stackptr++ = &peers.root; \
- for (u = rcu_dereference_protected(peers.root, \
- lockdep_is_held(&peers.lock)); \
+ *stackptr++ = &_base->root; \
+ for (u = rcu_dereference_protected(_base->root, \
+ lockdep_is_held(&_base->lock)); \
u != peer_avl_empty; ) { \
- if (_daddr == u->v4daddr) \
+ int cmp = addr_compare(_daddr, &u->daddr); \
+ if (cmp == 0) \
break; \
- if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
+ if (cmp == -1) \
v = &u->avl_left; \
else \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_dereference_protected(*v, \
- lockdep_is_held(&peers.lock)); \
+ lockdep_is_held(&_base->lock)); \
} \
u; \
})
@@ -185,13 +211,15 @@ static void unlink_from_unused(struct inet_peer *p)
* But every pointer we follow is guaranteed to be valid thanks to RCU.
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
-static struct inet_peer *lookup_rcu_bh(__be32 daddr)
+static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
+ struct inet_peer_base *base)
{
- struct inet_peer *u = rcu_dereference_bh(peers.root);
+ struct inet_peer *u = rcu_dereference_bh(base->root);
int count = 0;
while (u != peer_avl_empty) {
- if (daddr == u->v4daddr) {
+ int cmp = addr_compare(daddr, &u->daddr);
+ if (cmp == 0) {
/* Before taking a reference, check if this entry was
* deleted, unlink_from_pool() sets refcnt=-1 to make
* distinction between an unused entry (refcnt=0) and
@@ -201,7 +229,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
u = NULL;
return u;
}
- if ((__force __u32)daddr < (__force __u32)u->v4daddr)
+ if (cmp == -1)
u = rcu_dereference_bh(u->avl_left);
else
u = rcu_dereference_bh(u->avl_right);
@@ -212,19 +240,19 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
}
/* Called with local BH disabled and the pool lock held. */
-#define lookup_rightempty(start) \
+#define lookup_rightempty(start, base) \
({ \
struct inet_peer *u; \
struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
for (u = rcu_dereference_protected(*v, \
- lockdep_is_held(&peers.lock)); \
+ lockdep_is_held(&base->lock)); \
u->avl_right != peer_avl_empty_rcu; ) { \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_dereference_protected(*v, \
- lockdep_is_held(&peers.lock)); \
+ lockdep_is_held(&base->lock)); \
} \
u; \
})
@@ -234,7 +262,8 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
* Look into mm/map_avl.c for more detail description of the ideas.
*/
static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
- struct inet_peer __rcu ***stackend)
+ struct inet_peer __rcu ***stackend,
+ struct inet_peer_base *base)
{
struct inet_peer __rcu **nodep;
struct inet_peer *node, *l, *r;
@@ -243,20 +272,20 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
while (stackend > stack) {
nodep = *--stackend;
node = rcu_dereference_protected(*nodep,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
l = rcu_dereference_protected(node->avl_left,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
r = rcu_dereference_protected(node->avl_right,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
ll = rcu_dereference_protected(l->avl_left,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
lr = rcu_dereference_protected(l->avl_right,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
@@ -268,9 +297,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
lrl = rcu_dereference_protected(lr->avl_left,
- lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
+ lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */
lrr = rcu_dereference_protected(lr->avl_right,
- lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
+ lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
@@ -286,9 +315,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
rr = rcu_dereference_protected(r->avl_right,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
rl = rcu_dereference_protected(r->avl_left,
- lockdep_is_held(&peers.lock));
+ lockdep_is_held(&base->lock));
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
@@ -300,9 +329,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
rlr = rcu_dereference_protected(rl->avl_right,
- lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
+ lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */
rll = rcu_dereference_protected(rl->avl_left,
- lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
+ lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
@@ -321,14 +350,14 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
}
/* Called with local BH disabled and the pool lock held. */
-#define link_to_pool(n) \
+#define link_to_pool(n, base) \
do { \
n->avl_height = 1; \
n->avl_left = peer_avl_empty_rcu; \
n->avl_right = peer_avl_empty_rcu; \
/* lockless readers can catch us now */ \
rcu_assign_pointer(**--stackptr, n); \
- peer_avl_rebalance(stack, stackptr); \
+ peer_avl_rebalance(stack, stackptr, base); \
} while (0)
static void inetpeer_free_rcu(struct rcu_head *head)
@@ -337,13 +366,13 @@ static void inetpeer_free_rcu(struct rcu_head *head)
}
/* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
{
int do_free;
do_free = 0;
- spin_lock_bh(&peers.lock);
+ spin_lock_bh(&base->lock);
/* Check the reference counter. It was artificially incremented by 1
* in cleanup() function to prevent sudden disappearing. If we can
* atomically (because of lockless readers) take this last reference,
@@ -353,7 +382,7 @@ static void unlink_from_pool(struct inet_peer *p)
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
- if (lookup(p->v4daddr, stack) != p)
+ if (lookup(&p->daddr, stack, base) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
if (p->avl_left == peer_avl_empty_rcu) {
@@ -362,11 +391,11 @@ static void unlink_from_pool(struct inet_peer *p)
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
- t = lookup_rightempty(p);
+ t = lookup_rightempty(p, base);
BUG_ON(rcu_dereference_protected(*stackptr[-1],
- lockdep_is_held(&peers.lock)) != t);
+ lockdep_is_held(&base->lock)) != t);
**--stackptr = t->avl_left;
- /* t is removed, t->v4daddr > x->v4daddr for any
+ /* t is removed, t->daddr > x->daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
RCU_INIT_POINTER(*delp[0], t);
@@ -376,11 +405,11 @@ static void unlink_from_pool(struct inet_peer *p)
BUG_ON(delp[1] != &p->avl_left);
delp[1] = &t->avl_left; /* was &p->avl_left */
}
- peer_avl_rebalance(stack, stackptr);
- peers.total--;
+ peer_avl_rebalance(stack, stackptr, base);
+ base->total--;
do_free = 1;
}
- spin_unlock_bh(&peers.lock);
+ spin_unlock_bh(&base->lock);
if (do_free)
call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -395,6 +424,16 @@ static void unlink_from_pool(struct inet_peer *p)
inet_putpeer(p);
}
+static struct inet_peer_base *family_to_base(int family)
+{
+ return (family == AF_INET ? &v4_peers : &v6_peers);
+}
+
+static struct inet_peer_base *peer_to_base(struct inet_peer *p)
+{
+ return family_to_base(p->daddr.family);
+}
+
/* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl)
{
@@ -428,21 +467,22 @@ static int cleanup_once(unsigned long ttl)
* happen because of entry limits in route cache. */
return -1;
- unlink_from_pool(p);
+ unlink_from_pool(p, peer_to_base(p));
return 0;
}
/* Called with or without local BH being disabled. */
-struct inet_peer *inet_getpeer(__be32 daddr, int create)
+struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
{
- struct inet_peer *p;
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
+ struct inet_peer_base *base = family_to_base(AF_INET);
+ struct inet_peer *p;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
*/
rcu_read_lock_bh();
- p = lookup_rcu_bh(daddr);
+ p = lookup_rcu_bh(daddr, base);
rcu_read_unlock_bh();
if (p) {
@@ -456,50 +496,57 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
/* retry an exact lookup, taking the lock before.
* At least, nodes should be hot in our cache.
*/
- spin_lock_bh(&peers.lock);
- p = lookup(daddr, stack);
+ spin_lock_bh(&base->lock);
+ p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
atomic_inc(&p->refcnt);
- spin_unlock_bh(&peers.lock);
+ spin_unlock_bh(&base->lock);
/* Remove the entry from unused list if it was there. */
unlink_from_unused(p);
return p;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
- p->v4daddr = daddr;
+ p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
- atomic_set(&p->ip_id_count, secure_ip_id(daddr));
+ atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
p->tcp_ts_stamp = 0;
INIT_LIST_HEAD(&p->unused);
/* Link the node. */
- link_to_pool(p);
- peers.total++;
+ link_to_pool(p, base);
+ base->total++;
}
- spin_unlock_bh(&peers.lock);
+ spin_unlock_bh(&base->lock);
- if (peers.total >= inet_peer_threshold)
+ if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
cleanup_once(0);
return p;
}
+static int compute_total(void)
+{
+ return v4_peers.total + v6_peers.total;
+}
+EXPORT_SYMBOL_GPL(inet_getpeer);
+
/* Called with local BH disabled. */
static void peer_check_expire(unsigned long dummy)
{
unsigned long now = jiffies;
- int ttl;
+ int ttl, total;
- if (peers.total >= inet_peer_threshold)
+ total = compute_total();
+ if (total >= inet_peer_threshold)
ttl = inet_peer_minttl;
else
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
- peers.total / inet_peer_threshold * HZ;
+ total / inet_peer_threshold * HZ;
while (!cleanup_once(ttl)) {
if (jiffies != now)
break;
@@ -508,13 +555,14 @@ static void peer_check_expire(unsigned long dummy)
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
* interval depending on the total number of entries (more entries,
* less interval). */
- if (peers.total >= inet_peer_threshold)
+ total = compute_total();
+ if (total >= inet_peer_threshold)
peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
else
peer_periodic_timer.expires = jiffies
+ inet_peer_gc_maxtime
- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
- peers.total / inet_peer_threshold * HZ;
+ total / inet_peer_threshold * HZ;
add_timer(&peer_periodic_timer);
}
@@ -530,3 +578,4 @@ void inet_putpeer(struct inet_peer *p)
local_bh_enable();
}
+EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 168440834ad..a1151b8adf3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -45,6 +45,7 @@
#include <linux/udp.h>
#include <linux/inet.h>
#include <linux/netfilter_ipv4.h>
+#include <net/inet_ecn.h>
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
* code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -70,11 +71,28 @@ struct ipq {
__be32 daddr;
__be16 id;
u8 protocol;
+ u8 ecn; /* RFC3168 support */
int iif;
unsigned int rid;
struct inet_peer *peer;
};
+#define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */
+#define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */
+
+static inline u8 ip4_frag_ecn(u8 tos)
+{
+ tos = (tos & INET_ECN_MASK) + 1;
+ /*
+ * After the last operation we have (in binary):
+ * INET_ECN_NOT_ECT => 001
+ * INET_ECN_ECT_1 => 010
+ * INET_ECN_ECT_0 => 011
+ * INET_ECN_CE => 100
+ */
+ return (tos & 2) ? 0 : tos;
+}
+
static struct inet_frags ip4_frags;
int ip_frag_nqueues(struct net *net)
@@ -137,11 +155,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
qp->protocol = arg->iph->protocol;
qp->id = arg->iph->id;
+ qp->ecn = ip4_frag_ecn(arg->iph->tos);
qp->saddr = arg->iph->saddr;
qp->daddr = arg->iph->daddr;
qp->user = arg->user;
qp->peer = sysctl_ipfrag_max_dist ?
- inet_getpeer(arg->iph->saddr, 1) : NULL;
+ inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
}
static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
@@ -316,6 +335,7 @@ static int ip_frag_reinit(struct ipq *qp)
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
qp->iif = 0;
+ qp->ecn = 0;
return 0;
}
@@ -328,6 +348,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
int flags, offset;
int ihl, end;
int err = -ENOENT;
+ u8 ecn;
if (qp->q.last_in & INET_FRAG_COMPLETE)
goto err;
@@ -339,6 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
goto err;
}
+ ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
offset = ntohs(ip_hdr(skb)->frag_off);
flags = offset & ~IP_OFFSET;
offset &= IP_OFFSET;
@@ -472,6 +494,7 @@ found:
}
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
+ qp->ecn |= ecn;
atomic_add(skb->truesize, &qp->q.net->mem);
if (offset == 0)
qp->q.last_in |= INET_FRAG_FIRST_IN;
@@ -583,6 +606,17 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
iph = ip_hdr(head);
iph->frag_off = 0;
iph->tot_len = htons(len);
+ /* RFC3168 5.3 Fragmentation support
+ * If one fragment had INET_ECN_NOT_ECT,
+ * reassembled frame also has INET_ECN_NOT_ECT
+ * Elif one fragment had INET_ECN_CE
+ * reassembled frame also has INET_ECN_CE
+ */
+ if (qp->ecn & IPFRAG_ECN_CLEAR)
+ iph->tos &= ~INET_ECN_MASK;
+ else if (qp->ecn & IPFRAG_ECN_SET_CE)
+ iph->tos |= INET_ECN_CE;
+
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 70ff77f02ee..eb68a0e34e4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -405,11 +405,11 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
- sprintf(name, "gre%%d");
+ strcpy(name, "gre%d");
dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
if (!dev)
- return NULL;
+ return NULL;
dev_net_set(dev, net);
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
/* Looped back packet, drop it! */
- if (skb_rtable(skb)->fl.iif == 0)
+ if (rt_is_output_route(skb_rtable(skb)))
goto drop;
tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
@@ -772,16 +772,11 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
{
struct flowi fl = {
.oif = tunnel->parms.link,
- .nl_u = {
- .ip4_u = {
- .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos)
- }
- },
- .proto = IPPROTO_GRE
- }
-;
+ .fl4_dst = dst,
+ .fl4_src = tiph->saddr,
+ .fl4_tos = RT_TOS(tos),
+ .fl_gre_key = tunnel->parms.o_key
+ };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
dev->stats.tx_carrier_errors++;
goto tx_error;
@@ -823,7 +818,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
!ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
- skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
+ dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
}
}
@@ -895,7 +890,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
else
- iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
}
((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -951,14 +946,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
if (iph->daddr) {
struct flowi fl = {
.oif = tunnel->parms.link,
- .nl_u = {
- .ip4_u = {
- .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos)
- }
- },
- .proto = IPPROTO_GRE
+ .fl4_dst = iph->daddr,
+ .fl4_src = iph->saddr,
+ .fl4_tos = RT_TOS(iph->tos),
+ .proto = IPPROTO_GRE,
+ .fl_gre_key = tunnel->parms.o_key
};
struct rtable *rt;
@@ -1216,14 +1208,11 @@ static int ipgre_open(struct net_device *dev)
if (ipv4_is_multicast(t->parms.iph.daddr)) {
struct flowi fl = {
.oif = t->parms.link,
- .nl_u = {
- .ip4_u = {
- .daddr = t->parms.iph.daddr,
- .saddr = t->parms.iph.saddr,
- .tos = RT_TOS(t->parms.iph.tos)
- }
- },
- .proto = IPPROTO_GRE
+ .fl4_dst = t->parms.iph.daddr,
+ .fl4_src = t->parms.iph.saddr,
+ .fl4_tos = RT_TOS(t->parms.iph.tos),
+ .proto = IPPROTO_GRE,
+ .fl_gre_key = t->parms.o_key
};
struct rtable *rt;
@@ -1775,3 +1764,4 @@ module_exit(ipgre_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("gre");
MODULE_ALIAS_RTNL_LINK("gretap");
+MODULE_ALIAS("gre0");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 439d2a34ee4..04c7b3ba6b3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -82,6 +82,7 @@
#include <linux/tcp.h>
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
+EXPORT_SYMBOL(sysctl_ip_default_ttl);
/* Generate a checksum for an outgoing IP datagram. */
__inline__ void ip_send_check(struct iphdr *iph)
@@ -130,7 +131,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
int ttl = inet->uc_ttl;
if (ttl < 0)
- ttl = dst_metric(dst, RTAX_HOPLIMIT);
+ ttl = ip4_dst_hoplimit(dst);
return ttl;
}
@@ -341,15 +342,13 @@ int ip_queue_xmit(struct sk_buff *skb)
{
struct flowi fl = { .oif = sk->sk_bound_dev_if,
.mark = sk->sk_mark,
- .nl_u = { .ip4_u =
- { .daddr = daddr,
- .saddr = inet->inet_saddr,
- .tos = RT_CONN_FLAGS(sk) } },
+ .fl4_dst = daddr,
+ .fl4_src = inet->inet_saddr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
.proto = sk->sk_protocol,
.flags = inet_sk_flowi_flags(sk),
- .uli_u = { .ports =
- { .sport = inet->inet_sport,
- .dport = inet->inet_dport } } };
+ .fl_ip_sport = inet->inet_sport,
+ .fl_ip_dport = inet->inet_dport };
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
@@ -1404,14 +1403,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
{
struct flowi fl = { .oif = arg->bound_dev_if,
- .nl_u = { .ip4_u =
- { .daddr = daddr,
- .saddr = rt->rt_spec_dst,
- .tos = RT_TOS(ip_hdr(skb)->tos) } },
- /* Not quite clean, but right. */
- .uli_u = { .ports =
- { .sport = tcp_hdr(skb)->dest,
- .dport = tcp_hdr(skb)->source } },
+ .fl4_dst = daddr,
+ .fl4_src = rt->rt_spec_dst,
+ .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .fl_ip_sport = tcp_hdr(skb)->dest,
+ .fl_ip_dport = tcp_hdr(skb)->source,
.proto = sk->sk_protocol,
.flags = ip_reply_arg_flowi_flags(arg) };
security_skb_classify_flow(skb, &fl);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 3a6e1ec5e9a..2b097752426 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1191,13 +1191,13 @@ static int __init ic_dynamic(void)
(ic_proto_enabled & IC_USE_DHCP) &&
ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
- printk(",");
+ printk(KERN_CONT ",");
continue;
}
#endif /* IPCONFIG_DHCP */
if (ic_got_reply) {
- printk(" OK\n");
+ printk(KERN_CONT " OK\n");
break;
}
@@ -1205,7 +1205,7 @@ static int __init ic_dynamic(void)
continue;
if (! --retries) {
- printk(" timed out!\n");
+ printk(KERN_CONT " timed out!\n");
break;
}
@@ -1215,7 +1215,7 @@ static int __init ic_dynamic(void)
if (timeout > CONF_TIMEOUT_MAX)
timeout = CONF_TIMEOUT_MAX;
- printk(".");
+ printk(KERN_CONT ".");
}
#ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@ static int __init ic_dynamic(void)
((ic_got_reply & IC_RARP) ? "RARP"
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
&ic_servaddr);
- printk("my address is %pI4\n", &ic_myaddr);
+ printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
return 0;
}
@@ -1468,19 +1468,19 @@ static int __init ip_auto_config(void)
/*
* Clue in the operator.
*/
- printk("IP-Config: Complete:");
- printk("\n device=%s", ic_dev->name);
- printk(", addr=%pI4", &ic_myaddr);
- printk(", mask=%pI4", &ic_netmask);
- printk(", gw=%pI4", &ic_gateway);
- printk(",\n host=%s, domain=%s, nis-domain=%s",
+ printk("IP-Config: Complete:\n");
+ printk(" device=%s", ic_dev->name);
+ printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
+ printk(KERN_CONT ", mask=%pI4", &ic_netmask);
+ printk(KERN_CONT ", gw=%pI4", &ic_gateway);
+ printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s",
utsname()->nodename, ic_domain, utsname()->domainname);
- printk(",\n bootserver=%pI4", &ic_servaddr);
- printk(", rootserver=%pI4", &root_server_addr);
- printk(", rootpath=%s", root_server_path);
+ printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
+ printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
+ printk(KERN_CONT ", rootpath=%s", root_server_path);
if (ic_dev_mtu)
- printk(", mtu=%d", ic_dev_mtu);
- printk("\n");
+ printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
+ printk(KERN_CONT "\n");
#endif /* !SILENT */
return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index cd300aaee78..988f52fba54 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -463,13 +463,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct flowi fl = {
.oif = tunnel->parms.link,
- .nl_u = {
- .ip4_u = {
- .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos)
- }
- },
+ .fl4_dst = dst,
+ .fl4_src= tiph->saddr,
+ .fl4_tos = RT_TOS(tos),
.proto = IPPROTO_IPIP
};
@@ -589,13 +585,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
if (iph->daddr) {
struct flowi fl = {
.oif = tunnel->parms.link,
- .nl_u = {
- .ip4_u = {
- .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos)
- }
- },
+ .fl4_dst = iph->daddr,
+ .fl4_src = iph->saddr,
+ .fl4_tos = RT_TOS(iph->tos),
.proto = IPPROTO_IPIP
};
struct rtable *rt;
@@ -921,3 +913,4 @@ static void __exit ipip_fini(void)
module_init(ipip_init);
module_exit(ipip_fini);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86dd5691af4..3f3a9afd73e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1537,13 +1537,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_TUNNEL) {
struct flowi fl = {
.oif = vif->link,
- .nl_u = {
- .ip4_u = {
- .daddr = vif->remote,
- .saddr = vif->local,
- .tos = RT_TOS(iph->tos)
- }
- },
+ .fl4_dst = vif->remote,
+ .fl4_src = vif->local,
+ .fl4_tos = RT_TOS(iph->tos),
.proto = IPPROTO_IPIP
};
@@ -1553,12 +1549,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
} else {
struct flowi fl = {
.oif = vif->link,
- .nl_u = {
- .ip4_u = {
- .daddr = iph->daddr,
- .tos = RT_TOS(iph->tos)
- }
- },
+ .fl4_dst = iph->daddr,
+ .fl4_tos = RT_TOS(iph->tos),
.proto = IPPROTO_IPIP
};
@@ -1654,7 +1646,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (mrt->vif_table[vif].dev != skb->dev) {
int true_vifi;
- if (skb_rtable(skb)->fl.iif == 0) {
+ if (rt_is_output_route(skb_rtable(skb))) {
/* It is our own packet, looped back.
* Very complicated situation...
*
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index d88a46c54fd..994a1f29ebb 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -31,10 +31,10 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
* packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
*/
if (addr_type == RTN_LOCAL) {
- fl.nl_u.ip4_u.daddr = iph->daddr;
+ fl.fl4_dst = iph->daddr;
if (type == RTN_LOCAL)
- fl.nl_u.ip4_u.saddr = iph->saddr;
- fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
+ fl.fl4_src = iph->saddr;
+ fl.fl4_tos = RT_TOS(iph->tos);
fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
fl.mark = skb->mark;
fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
@@ -47,7 +47,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
} else {
/* non-local src, find valid iif to satisfy
* rp-filter when calling ip_route_input. */
- fl.nl_u.ip4_u.daddr = iph->saddr;
+ fl.fl4_dst = iph->saddr;
if (ip_route_output_key(net, &rt, &fl) != 0)
return -1;
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 48111594ee9..19eb59d0103 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,15 +3,15 @@
#
# objects for l3 independent conntrack
-nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
ifeq ($(CONFIG_PROC_FS),y)
nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
endif
endif
-nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
-iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o
+nf_nat-y := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
+iptable_nat-y := nf_nat_rule.o nf_nat_standalone.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 43eec80c0e7..1ff79e557f9 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -116,7 +116,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
if (ip_route_me_harder(nskb, addr_type))
goto free_nskb;
- niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
+ niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 37f8adb68c7..63f60fc5d26 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -97,7 +97,7 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
- return ret;
+ return 0;
ret = seq_printf(s, "secctx=%s ", secctx);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1f85ef28989..a3d5ab786e8 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -549,10 +549,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{
struct flowi fl = { .oif = ipc.oif,
.mark = sk->sk_mark,
- .nl_u = { .ip4_u =
- { .daddr = daddr,
- .saddr = saddr,
- .tos = tos } },
+ .fl4_dst = daddr,
+ .fl4_src = saddr,
+ .fl4_tos = tos,
.proto = inet->hdrincl ? IPPROTO_RAW :
sk->sk_protocol,
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 93bfd95584f..351dc4e8524 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -139,20 +139,26 @@ static unsigned long expires_ljiffies;
*/
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
+static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
+static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
static void ipv4_dst_destroy(struct dst_entry *dst);
-static void ipv4_dst_ifdown(struct dst_entry *dst,
- struct net_device *dev, int how);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static int rt_garbage_collect(struct dst_ops *ops);
+static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ int how)
+{
+}
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
+ .default_advmss = ipv4_default_advmss,
+ .default_mtu = ipv4_default_mtu,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
@@ -381,8 +387,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
(__force u32)r->rt_gateway,
r->rt_flags, atomic_read(&r->dst.__refcnt),
r->dst.__use, 0, (__force u32)r->rt_src,
- (dst_metric(&r->dst, RTAX_ADVMSS) ?
- (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
+ dst_metric_advmss(&r->dst) + 40,
dst_metric(&r->dst, RTAX_WINDOW),
(int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
dst_metric(&r->dst, RTAX_RTTVAR)),
@@ -621,7 +626,7 @@ static inline int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rth->fl.iif && rth->dst.rt_next;
+ rt_is_input_route(rth) && rth->dst.rt_next;
}
static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +671,7 @@ static inline u32 rt_score(struct rtable *rt)
if (rt_valuable(rt))
score |= (1<<31);
- if (!rt->fl.iif ||
+ if (rt_is_output_route(rt) ||
!(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
score |= (1<<30);
@@ -682,17 +687,17 @@ static inline bool rt_caching(const struct net *net)
static inline bool compare_hash_inputs(const struct flowi *fl1,
const struct flowi *fl2)
{
- return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
- ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+ return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+ ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
(fl1->iif ^ fl2->iif)) == 0);
}
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
- ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+ return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+ ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
(fl1->mark ^ fl2->mark) |
- (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
(fl1->oif ^ fl2->oif) |
(fl1->iif ^ fl2->iif)) == 0;
}
@@ -712,13 +717,15 @@ static inline int rt_is_expired(struct rtable *rth)
* Can be called by a softirq or a process.
* In the later case, we want to be reschedule if necessary
*/
-static void rt_do_flush(int process_context)
+static void rt_do_flush(struct net *net, int process_context)
{
unsigned int i;
struct rtable *rth, *next;
- struct rtable * tail;
for (i = 0; i <= rt_hash_mask; i++) {
+ struct rtable __rcu **pprev;
+ struct rtable *list;
+
if (process_context && need_resched())
cond_resched();
rth = rcu_dereference_raw(rt_hash_table[i].chain);
@@ -726,50 +733,32 @@ static void rt_do_flush(int process_context)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
-#ifdef CONFIG_NET_NS
- {
- struct rtable __rcu **prev;
- struct rtable *p;
- rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ list = NULL;
+ pprev = &rt_hash_table[i].chain;
+ rth = rcu_dereference_protected(*pprev,
lockdep_is_held(rt_hash_lock_addr(i)));
- /* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail;
- tail = rcu_dereference_protected(tail->dst.rt_next,
- lockdep_is_held(rt_hash_lock_addr(i))))
- if (!rt_is_expired(tail))
- break;
- if (rth != tail)
- rt_hash_table[i].chain = tail;
-
- /* call rt_free on entries after the tail requiring flush */
- prev = &rt_hash_table[i].chain;
- for (p = rcu_dereference_protected(*prev,
- lockdep_is_held(rt_hash_lock_addr(i)));
- p != NULL;
- p = next) {
- next = rcu_dereference_protected(p->dst.rt_next,
+ while (rth) {
+ next = rcu_dereference_protected(rth->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i)));
- if (!rt_is_expired(p)) {
- prev = &p->dst.rt_next;
+
+ if (!net ||
+ net_eq(dev_net(rth->dst.dev), net)) {
+ rcu_assign_pointer(*pprev, next);
+ rcu_assign_pointer(rth->dst.rt_next, list);
+ list = rth;
} else {
- *prev = next;
- rt_free(p);
+ pprev = &rth->dst.rt_next;
}
+ rth = next;
}
- }
-#else
- rth = rcu_dereference_protected(rt_hash_table[i].chain,
- lockdep_is_held(rt_hash_lock_addr(i)));
- rcu_assign_pointer(rt_hash_table[i].chain, NULL);
- tail = NULL;
-#endif
+
spin_unlock_bh(rt_hash_lock_addr(i));
- for (; rth != tail; rth = next) {
- next = rcu_dereference_protected(rth->dst.rt_next, 1);
- rt_free(rth);
+ for (; list; list = next) {
+ next = rcu_dereference_protected(list->dst.rt_next, 1);
+ rt_free(list);
}
}
}
@@ -917,13 +906,13 @@ void rt_cache_flush(struct net *net, int delay)
{
rt_cache_invalidate(net);
if (delay >= 0)
- rt_do_flush(!in_softirq());
+ rt_do_flush(net, !in_softirq());
}
/* Flush previous cache invalidated entries from the cache */
-void rt_cache_flush_batch(void)
+void rt_cache_flush_batch(struct net *net)
{
- rt_do_flush(!in_softirq());
+ rt_do_flush(net, !in_softirq());
}
static void rt_emergency_hash_rebuild(struct net *net)
@@ -1124,7 +1113,7 @@ restart:
*/
rt->dst.flags |= DST_NOCACHE;
- if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+ if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
int err = arp_bind_neighbour(&rt->dst);
if (err) {
if (net_ratelimit())
@@ -1222,7 +1211,7 @@ restart:
/* Try to bind route to arp only if it is output
route or unicast forwarding path.
*/
- if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+ if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
int err = arp_bind_neighbour(&rt->dst);
if (err) {
spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1287,7 +1276,7 @@ void rt_bind_peer(struct rtable *rt, int create)
{
struct inet_peer *peer;
- peer = inet_getpeer(rt->rt_dst, create);
+ peer = inet_getpeer_v4(rt->rt_dst, create);
if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
inet_putpeer(peer);
@@ -1404,7 +1393,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
if (rth->fl.fl4_dst != daddr ||
rth->fl.fl4_src != skeys[i] ||
rth->fl.oif != ikeys[k] ||
- rth->fl.iif != 0 ||
+ rt_is_input_route(rth) ||
rt_is_expired(rth) ||
!net_eq(dev_net(rth->dst.dev), net)) {
rthp = &rth->dst.rt_next;
@@ -1433,8 +1422,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt->dst.child = NULL;
if (rt->dst.dev)
dev_hold(rt->dst.dev);
- if (rt->idev)
- in_dev_hold(rt->idev);
rt->dst.obsolete = -1;
rt->dst.lastuse = jiffies;
rt->dst.path = &rt->dst;
@@ -1666,7 +1653,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
rth->rt_dst != daddr ||
rth->rt_src != iph->saddr ||
rth->fl.oif != ikeys[k] ||
- rth->fl.iif != 0 ||
+ rt_is_input_route(rth) ||
dst_metric_locked(&rth->dst, RTAX_MTU) ||
!net_eq(dev_net(rth->dst.dev), net) ||
rt_is_expired(rth))
@@ -1686,11 +1673,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
if (mtu < dst_mtu(&rth->dst)) {
dst_confirm(&rth->dst);
if (mtu < ip_rt_min_pmtu) {
+ u32 lock = dst_metric(&rth->dst,
+ RTAX_LOCK);
mtu = ip_rt_min_pmtu;
- rth->dst.metrics[RTAX_LOCK-1] |=
- (1 << RTAX_MTU);
+ lock |= (1 << RTAX_MTU);
+ dst_metric_set(&rth->dst, RTAX_LOCK,
+ lock);
}
- rth->dst.metrics[RTAX_MTU-1] = mtu;
+ dst_metric_set(&rth->dst, RTAX_MTU, mtu);
dst_set_expires(&rth->dst,
ip_rt_mtu_expires);
}
@@ -1708,10 +1698,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
if (dst_mtu(dst) > mtu && mtu >= 68 &&
!(dst_metric_locked(dst, RTAX_MTU))) {
if (mtu < ip_rt_min_pmtu) {
+ u32 lock = dst_metric(dst, RTAX_LOCK);
mtu = ip_rt_min_pmtu;
- dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
+ dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
}
- dst->metrics[RTAX_MTU-1] = mtu;
+ dst_metric_set(dst, RTAX_MTU, mtu);
dst_set_expires(dst, ip_rt_mtu_expires);
call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
}
@@ -1728,33 +1719,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
struct inet_peer *peer = rt->peer;
- struct in_device *idev = rt->idev;
if (peer) {
rt->peer = NULL;
inet_putpeer(peer);
}
-
- if (idev) {
- rt->idev = NULL;
- in_dev_put(idev);
- }
}
-static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
- int how)
-{
- struct rtable *rt = (struct rtable *) dst;
- struct in_device *idev = rt->idev;
- if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
- struct in_device *loopback_idev =
- in_dev_get(dev_net(dev)->loopback_dev);
- if (loopback_idev) {
- rt->idev = loopback_idev;
- in_dev_put(idev);
- }
- }
-}
static void ipv4_link_failure(struct sk_buff *skb)
{
@@ -1790,7 +1761,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
__be32 src;
struct fib_result res;
- if (rt->fl.iif == 0)
+ if (rt_is_output_route(rt))
src = rt->rt_src;
else {
rcu_read_lock();
@@ -1814,38 +1785,55 @@ static void set_class_tag(struct rtable *rt, u32 tag)
}
#endif
+static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
+{
+ unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
+
+ if (advmss == 0) {
+ advmss = max_t(unsigned int, dst->dev->mtu - 40,
+ ip_rt_min_advmss);
+ if (advmss > 65535 - 40)
+ advmss = 65535 - 40;
+ }
+ return advmss;
+}
+
+static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+{
+ unsigned int mtu = dst->dev->mtu;
+
+ if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+ const struct rtable *rt = (const struct rtable *) dst;
+
+ if (rt->rt_gateway != rt->rt_dst && mtu > 576)
+ mtu = 576;
+ }
+
+ if (mtu > IP_MAX_MTU)
+ mtu = IP_MAX_MTU;
+
+ return mtu;
+}
+
static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
{
+ struct dst_entry *dst = &rt->dst;
struct fib_info *fi = res->fi;
if (fi) {
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
- memcpy(rt->dst.metrics, fi->fib_metrics,
- sizeof(rt->dst.metrics));
- if (fi->fib_mtu == 0) {
- rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
- if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
- rt->rt_gateway != rt->rt_dst &&
- rt->dst.dev->mtu > 576)
- rt->dst.metrics[RTAX_MTU-1] = 576;
- }
+ dst_import_metrics(dst, fi->fib_metrics);
#ifdef CONFIG_NET_CLS_ROUTE
- rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+ dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
- } else
- rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
-
- if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
- rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
- if (dst_mtu(&rt->dst) > IP_MAX_MTU)
- rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
- if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
- rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
- ip_rt_min_advmss);
- if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
- rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+ }
+
+ if (dst_mtu(dst) > IP_MAX_MTU)
+ dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
+ if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
+ dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
#ifdef CONFIG_NET_CLS_ROUTE
#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1910,7 +1898,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->fl.iif = dev->ifindex;
rth->dst.dev = init_net.loopback_dev;
dev_hold(rth->dst.dev);
- rth->idev = in_dev_get(rth->dst.dev);
rth->fl.oif = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2037,6 @@ static int __mkroute_input(struct sk_buff *skb,
rth->fl.iif = in_dev->dev->ifindex;
rth->dst.dev = (out_dev)->dev;
dev_hold(rth->dst.dev);
- rth->idev = in_dev_get(rth->dst.dev);
rth->fl.oif = 0;
rth->rt_spec_dst= spec_dst;
@@ -2111,12 +2097,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
{
struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = daddr,
- .saddr = saddr,
- .tos = tos,
- .scope = RT_SCOPE_UNIVERSE,
- } },
+ struct flowi fl = { .fl4_dst = daddr,
+ .fl4_src = saddr,
+ .fl4_tos = tos,
+ .fl4_scope = RT_SCOPE_UNIVERSE,
.mark = skb->mark,
.iif = dev->ifindex };
unsigned flags = 0;
@@ -2231,7 +2215,6 @@ local_input:
rth->fl.iif = dev->ifindex;
rth->dst.dev = net->loopback_dev;
dev_hold(rth->dst.dev);
- rth->idev = in_dev_get(rth->dst.dev);
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->dst.input= ip_local_deliver;
@@ -2417,9 +2400,6 @@ static int __mkroute_output(struct rtable **result,
if (!rth)
return -ENOBUFS;
- in_dev_hold(in_dev);
- rth->idev = in_dev;
-
atomic_set(&rth->dst.__refcnt, 1);
rth->dst.flags= DST_HOST;
if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2506,14 +2486,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
const struct flowi *oldflp)
{
u32 tos = RT_FL_TOS(oldflp);
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = oldflp->fl4_dst,
- .saddr = oldflp->fl4_src,
- .tos = tos & IPTOS_RT_MASK,
- .scope = ((tos & RTO_ONLINK) ?
- RT_SCOPE_LINK :
- RT_SCOPE_UNIVERSE),
- } },
+ struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
+ .fl4_src = oldflp->fl4_src,
+ .fl4_tos = tos & IPTOS_RT_MASK,
+ .fl4_scope = ((tos & RTO_ONLINK) ?
+ RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
.mark = oldflp->mark,
.iif = net->loopback_dev->ifindex,
.oif = oldflp->oif };
@@ -2700,7 +2677,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rth = rcu_dereference_bh(rth->dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src &&
- rth->fl.iif == 0 &&
+ rt_is_output_route(rth) &&
rth->fl.oif == flp->oif &&
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2756,7 +2733,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+ dst_copy_metrics(new, &ort->dst);
new->dev = ort->dst.dev;
if (new->dev)
@@ -2764,9 +2741,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
rt->fl = ort->fl;
- rt->idev = ort->idev;
- if (rt->idev)
- in_dev_hold(rt->idev);
rt->rt_genid = rt_genid(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
@@ -2858,7 +2832,7 @@ static int rt_fill_info(struct net *net,
if (rt->dst.tclassid)
NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
#endif
- if (rt->fl.iif)
+ if (rt_is_input_route(rt))
NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
else if (rt->rt_src != rt->fl.fl4_src)
NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2866,7 +2840,7 @@ static int rt_fill_info(struct net *net,
if (rt->rt_dst != rt->rt_gateway)
NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
- if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+ if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
goto nla_put_failure;
if (rt->fl.mark)
@@ -2883,7 +2857,7 @@ static int rt_fill_info(struct net *net,
}
}
- if (rt->fl.iif) {
+ if (rt_is_input_route(rt)) {
#ifdef CONFIG_IP_MROUTE
__be32 dst = rt->rt_dst;
@@ -2978,13 +2952,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
err = -rt->dst.error;
} else {
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = dst,
- .saddr = src,
- .tos = rtm->rtm_tos,
- },
- },
+ .fl4_dst = dst,
+ .fl4_src = src,
+ .fl4_tos = rtm->rtm_tos,
.oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
.mark = mark,
};
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650cace2180..47519205a01 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -346,17 +346,14 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
*/
{
struct flowi fl = { .mark = sk->sk_mark,
- .nl_u = { .ip4_u =
- { .daddr = ((opt && opt->srr) ?
- opt->faddr :
- ireq->rmt_addr),
- .saddr = ireq->loc_addr,
- .tos = RT_CONN_FLAGS(sk) } },
+ .fl4_dst = ((opt && opt->srr) ?
+ opt->faddr : ireq->rmt_addr),
+ .fl4_src = ireq->loc_addr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
.proto = IPPROTO_TCP,
.flags = inet_sk_flowi_flags(sk),
- .uli_u = { .ports =
- { .sport = th->dest,
- .dport = th->source } } };
+ .fl_ip_sport = th->dest,
+ .fl_ip_dport = th->source };
security_req_classify_flow(req, &fl);
if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
reqsk_free(req);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1b4ec21497a..1a456652086 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -28,6 +28,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
+static int ip_ttl_min = 1;
+static int ip_ttl_max = 255;
/* Update system visible IP port range */
static void set_local_port_range(int range[2])
@@ -155,8 +157,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_ip_default_ttl,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = ipv4_doint_and_flush,
- .extra2 = &init_net,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &ip_ttl_min,
+ .extra2 = &ip_ttl_max,
},
{
.procname = "ip_no_pmtu_disc",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f15c36a706e..6c11eece262 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
- KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
#endif
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* shouldn't happen.
*/
if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
- KERN_INFO "recvmsg bug: copied %X "
- "seq %X rcvnxt %X fl %X\n", *seq,
- TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
- flags))
+ "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+ flags))
break;
offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto found_ok_skb;
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
- WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
- "copied %X seq %X rcvnxt %X fl %X\n",
- *seq, TCP_SKB_CB(skb)->seq,
- tp->rcv_nxt, flags);
+ WARN(!(flags & MSG_PEEK),
+ "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
}
/* Well, if we have backlog, try to process it now yet. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6d8ab1c4efc..2549b29b062 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -734,7 +734,7 @@ void tcp_update_metrics(struct sock *sk)
* Reset our results.
*/
if (!(dst_metric_locked(dst, RTAX_RTT)))
- dst->metrics[RTAX_RTT - 1] = 0;
+ dst_metric_set(dst, RTAX_RTT, 0);
return;
}
@@ -776,34 +776,38 @@ void tcp_update_metrics(struct sock *sk)
if (dst_metric(dst, RTAX_SSTHRESH) &&
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
(tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
- dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
+ dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
if (!dst_metric_locked(dst, RTAX_CWND) &&
tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
- dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
+ dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
} else if (tp->snd_cwnd > tp->snd_ssthresh &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!dst_metric_locked(dst, RTAX_SSTHRESH))
- dst->metrics[RTAX_SSTHRESH-1] =
- max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
+ dst_metric_set(dst, RTAX_SSTHRESH,
+ max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
if (!dst_metric_locked(dst, RTAX_CWND))
- dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
+ dst_metric_set(dst, RTAX_CWND,
+ (dst_metric(dst, RTAX_CWND) +
+ tp->snd_cwnd) >> 1);
} else {
/* Else slow start did not finish, cwnd is non-sense,
ssthresh may be also invalid.
*/
if (!dst_metric_locked(dst, RTAX_CWND))
- dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+ dst_metric_set(dst, RTAX_CWND,
+ (dst_metric(dst, RTAX_CWND) +
+ tp->snd_ssthresh) >> 1);
if (dst_metric(dst, RTAX_SSTHRESH) &&
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
- dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
+ dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
}
if (!dst_metric_locked(dst, RTAX_REORDERING)) {
if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
tp->reordering != sysctl_tcp_reordering)
- dst->metrics[RTAX_REORDERING-1] = tp->reordering;
+ dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
}
}
}
@@ -912,25 +916,20 @@ static void tcp_init_metrics(struct sock *sk)
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
}
tcp_set_rto(sk);
- if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
- goto reset;
-
-cwnd:
- tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
- return;
-
+ if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
reset:
- /* Play conservative. If timestamps are not
- * supported, TCP will fail to recalculate correct
- * rtt, if initial rto is too small. FORGET ALL AND RESET!
- */
- if (!tp->rx_opt.saw_tstamp && tp->srtt) {
- tp->srtt = 0;
- tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
- inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ /* Play conservative. If timestamps are not
+ * supported, TCP will fail to recalculate correct
+ * rtt, if initial rto is too small. FORGET ALL AND RESET!
+ */
+ if (!tp->rx_opt.saw_tstamp && tp->srtt) {
+ tp->srtt = 0;
+ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
+ inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ }
}
- goto cwnd;
+ tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
}
static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d978bb2f748..856f68466d4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1210,12 +1210,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
};
#endif
-static struct timewait_sock_ops tcp_timewait_sock_ops = {
- .twsk_obj_size = sizeof(struct tcp_timewait_sock),
- .twsk_unique = tcp_twsk_unique,
- .twsk_destructor= tcp_twsk_destructor,
-};
-
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
@@ -1347,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
- peer->v4daddr == saddr) {
+ peer->daddr.a4 == saddr) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
@@ -1442,7 +1436,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
- newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+ newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
@@ -1763,64 +1757,40 @@ do_time_wait:
goto discard_it;
}
-/* VJ's idea. Save last timestamp seen from this destination
- * and hold it at least for normal timewait interval to use for duplicate
- * segment detection in subsequent connections, before they enter synchronized
- * state.
- */
-
-int tcp_v4_remember_stamp(struct sock *sk)
+struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
{
+ struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
struct inet_sock *inet = inet_sk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
- struct inet_peer *peer = NULL;
- int release_it = 0;
+ struct inet_peer *peer;
if (!rt || rt->rt_dst != inet->inet_daddr) {
- peer = inet_getpeer(inet->inet_daddr, 1);
- release_it = 1;
+ peer = inet_getpeer_v4(inet->inet_daddr, 1);
+ *release_it = true;
} else {
if (!rt->peer)
rt_bind_peer(rt, 1);
peer = rt->peer;
+ *release_it = false;
}
- if (peer) {
- if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
- peer->tcp_ts = tp->rx_opt.ts_recent;
- }
- if (release_it)
- inet_putpeer(peer);
- return 1;
- }
-
- return 0;
+ return peer;
}
-EXPORT_SYMBOL(tcp_v4_remember_stamp);
+EXPORT_SYMBOL(tcp_v4_get_peer);
-int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
+void *tcp_v4_tw_get_peer(struct sock *sk)
{
- struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
-
- if (peer) {
- const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
-
- if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
- peer->tcp_ts = tcptw->tw_ts_recent;
- }
- inet_putpeer(peer);
- return 1;
- }
+ struct inet_timewait_sock *tw = inet_twsk(sk);
- return 0;
+ return inet_getpeer_v4(tw->tw_daddr, 1);
}
+EXPORT_SYMBOL(tcp_v4_tw_get_peer);
+
+static struct timewait_sock_ops tcp_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct tcp_timewait_sock),
+ .twsk_unique = tcp_twsk_unique,
+ .twsk_destructor= tcp_twsk_destructor,
+ .twsk_getpeer = tcp_v4_tw_get_peer,
+};
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
@@ -1828,7 +1798,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
- .remember_stamp = tcp_v4_remember_stamp,
+ .get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a66735f7596..80b1f80759a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,6 +49,56 @@ struct inet_timewait_death_row tcp_death_row = {
};
EXPORT_SYMBOL_GPL(tcp_death_row);
+/* VJ's idea. Save last timestamp seen from this destination
+ * and hold it at least for normal timewait interval to use for duplicate
+ * segment detection in subsequent connections, before they enter synchronized
+ * state.
+ */
+
+static int tcp_remember_stamp(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_peer *peer;
+ bool release_it;
+
+ peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
+ if (peer) {
+ if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
+ peer->tcp_ts = tp->rx_opt.ts_recent;
+ }
+ if (release_it)
+ inet_putpeer(peer);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+{
+ struct sock *sk = (struct sock *) tw;
+ struct inet_peer *peer;
+
+ peer = twsk_getpeer(sk);
+ if (peer) {
+ const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+
+ if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
+ peer->tcp_ts = tcptw->tw_ts_recent;
+ }
+ inet_putpeer(peer);
+ return 1;
+ }
+ return 0;
+}
+
static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
@@ -149,14 +199,9 @@ kill_with_rst:
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
}
- /* I am shamed, but failed to make it more elegant.
- * Yes, it is direct reference to IP, which is impossible
- * to generalize to IPv6. Taking into account that IPv6
- * do not understand recycling in any case, it not
- * a big problem in practice. --ANK */
- if (tw->tw_family == AF_INET &&
- tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
- tcp_v4_tw_remember_stamp(tw))
+ if (tcp_death_row.sysctl_tw_recycle &&
+ tcptw->tw_ts_recent_stamp &&
+ tcp_tw_remember_stamp(tw))
inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
TCP_TIMEWAIT_LEN);
else
@@ -274,7 +319,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
int recycle_ok = 0;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
- recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
+ recycle_ok = tcp_remember_stamp(sk);
if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
tw = inet_twsk_alloc(sk, state);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 61c2463e275..dc7c096ddfe 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -55,7 +55,7 @@ int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
int sysctl_tcp_tso_win_divisor __read_mostly = 3;
int sysctl_tcp_mtu_probing __read_mostly = 0;
-int sysctl_tcp_base_mss __read_mostly = 512;
+int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@ -119,9 +119,13 @@ static __u16 tcp_advertise_mss(struct sock *sk)
struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
- if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
- mss = dst_metric(dst, RTAX_ADVMSS);
- tp->advmss = mss;
+ if (dst) {
+ unsigned int metric = dst_metric_advmss(dst);
+
+ if (metric < mss) {
+ mss = metric;
+ tp->advmss = mss;
+ }
}
return (__u16)mss;
@@ -224,10 +228,15 @@ void tcp_select_initial_window(int __space, __u32 mss,
}
}
- /* Set initial window to value enough for senders, following RFC5681. */
+ /* Set initial window to a value enough for senders starting with
+ * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * a limit on the initial window when mss is larger than 1460.
+ */
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = rfc3390_bytes_to_packets(mss);
-
+ int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ if (mss > 1460)
+ init_cwnd =
+ max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
@@ -824,8 +833,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0)
+ if (tcp_packets_in_flight(tp) == 0) {
tcp_ca_event(sk, CA_EVENT_TX_START);
+ skb->ooo_okay = 1;
+ } else
+ skb->ooo_okay = 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@@ -2419,7 +2431,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_dst_set(skb, dst_clone(dst));
- mss = dst_metric(dst, RTAX_ADVMSS);
+ mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
@@ -2553,7 +2565,7 @@ static void tcp_connect_init(struct sock *sk)
if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
- tp->advmss = dst_metric(dst, RTAX_ADVMSS);
+ tp->advmss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
tp->advmss = tp->rx_opt.user_mss;
@@ -2596,6 +2608,7 @@ int tcp_connect(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
+ int err;
tcp_connect_init(sk);
@@ -2618,7 +2631,9 @@ int tcp_connect(struct sock *sk)
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
tp->packets_out += tcp_skb_pcount(buff);
- tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+ err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+ if (err == -ECONNREFUSED)
+ return err;
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 6211e211417..85ee7eb7e38 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -154,7 +154,7 @@ static int tcpprobe_sprint(char *tbuf, int n)
struct timespec tv
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
- return snprintf(tbuf, n,
+ return scnprintf(tbuf, n,
"%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
@@ -174,7 +174,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
return -EINVAL;
while (cnt < len) {
- char tbuf[128];
+ char tbuf[164];
int width;
/* Wait for data in buffer */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2d3ded4d078..8157b17959e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -430,7 +430,7 @@ begin:
if (result) {
exact_match:
- if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
@@ -500,7 +500,7 @@ begin:
goto begin;
if (result) {
- if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, saddr, hnum, sport,
daddr, dport, dif) < badness)) {
@@ -890,15 +890,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (rt == NULL) {
struct flowi fl = { .oif = ipc.oif,
.mark = sk->sk_mark,
- .nl_u = { .ip4_u =
- { .daddr = faddr,
- .saddr = saddr,
- .tos = tos } },
+ .fl4_dst = faddr,
+ .fl4_src = saddr,
+ .fl4_tos = tos,
.proto = sk->sk_protocol,
.flags = inet_sk_flowi_flags(sk),
- .uli_u = { .ports =
- { .sport = inet->inet_sport,
- .dport = dport } } };
+ .fl_ip_sport = inet->inet_sport,
+ .fl_ip_dport = dport };
struct net *net = sock_net(sk);
security_sk_classify_flow(sk, &fl);
@@ -2229,7 +2227,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
- offset = skb->csum_start - skb_headroom(skb);
+ offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 6f368413eb0..534972e114a 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -56,7 +56,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
ip_select_ident(top_iph, dst->child, NULL);
- top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT);
+ top_iph->ttl = ip4_dst_hoplimit(dst->child);
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 4464f3bff6a..b057d40adde 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/inetdevice.h>
+#include <linux/if_tunnel.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
@@ -22,12 +23,8 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
xfrm_address_t *daddr)
{
struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .tos = tos,
- .daddr = daddr->a4,
- },
- },
+ .fl4_dst = daddr->a4,
+ .fl4_tos = tos,
};
struct dst_entry *dst;
struct rtable *rt;
@@ -80,10 +77,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.dst.dev = dev;
dev_hold(dev);
- xdst->u.rt.idev = in_dev_get(dev);
- if (!xdst->u.rt.idev)
- return -ENODEV;
-
xdst->u.rt.peer = rt->peer;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
@@ -158,6 +151,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
}
break;
+
+ case IPPROTO_GRE:
+ if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
+ __be16 *greflags = (__be16 *)xprth;
+ __be32 *gre_hdr = (__be32 *)xprth;
+
+ if (greflags[0] & GRE_KEY) {
+ if (greflags[0] & GRE_CSUM)
+ gre_hdr++;
+ fl->fl_gre_key = gre_hdr[1];
+ }
+ }
+ break;
+
default:
fl->fl_ipsec_spi = 0;
break;
@@ -189,8 +196,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (likely(xdst->u.rt.idev))
- in_dev_put(xdst->u.rt.idev);
if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
xfrm_dst_destroy(xdst);
@@ -199,27 +204,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int unregister)
{
- struct xfrm_dst *xdst;
-
if (!unregister)
return;
- xdst = (struct xfrm_dst *)dst;
- if (xdst->u.rt.idev->dev == dev) {
- struct in_device *loopback_idev =
- in_dev_get(dev_net(dev)->loopback_dev);
- BUG_ON(!loopback_idev);
-
- do {
- in_dev_put(xdst->u.rt.idev);
- xdst->u.rt.idev = loopback_idev;
- in_dev_hold(loopback_idev);
- xdst = (struct xfrm_dst *)xdst->u.dst.child;
- } while (xdst->u.dst.xfrm);
-
- __in_dev_put(loopback_idev);
- }
-
xfrm_dst_ifdown(dst, dev);
}