summaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c349
1 files changed, 74 insertions, 275 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 72e88c20802..95bfa1ba5b2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -158,34 +158,8 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
- u32 *p = NULL;
-
- peer = rt_get_peer_create(rt, rt->rt_dst);
- if (peer) {
- u32 *old_p = __DST_METRICS_PTR(old);
- unsigned long prev, new;
-
- p = peer->metrics;
- if (inet_metrics_new(peer))
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
- new = (unsigned long) p;
- prev = cmpxchg(&dst->_metrics, old, new);
-
- if (prev != old) {
- p = __DST_METRICS_PTR(prev);
- if (prev & DST_METRICS_READ_ONLY)
- p = NULL;
- } else {
- if (rt->fi) {
- fib_info_put(rt->fi);
- rt->fi = NULL;
- }
- }
- }
- return p;
+ WARN_ON(1);
+ return NULL;
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -423,18 +397,16 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
int len;
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
- "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
- r->dst.dev ? r->dst.dev->name : "*",
- (__force u32)r->rt_dst,
- (__force u32)r->rt_gateway,
- r->rt_flags, atomic_read(&r->dst.__refcnt),
- r->dst.__use, 0, (__force u32)r->rt_src,
- dst_metric_advmss(&r->dst) + 40,
- dst_metric(&r->dst, RTAX_WINDOW),
- (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
- dst_metric(&r->dst, RTAX_RTTVAR)),
- r->rt_key_tos,
- -1, 0, 0, &len);
+ "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
+ r->dst.dev ? r->dst.dev->name : "*",
+ (__force u32)r->rt_dst,
+ (__force u32)r->rt_gateway,
+ r->rt_flags, atomic_read(&r->dst.__refcnt),
+ r->dst.__use, 0, (__force u32)r->rt_src,
+ dst_metric_advmss(&r->dst) + 40,
+ dst_metric(&r->dst, RTAX_WINDOW), 0,
+ r->rt_key_tos,
+ -1, 0, 0, &len);
seq_printf(seq, "%*s\n", 127 - len, "");
}
@@ -671,7 +643,7 @@ static inline int rt_fast_clean(struct rtable *rth)
static inline int rt_valuable(struct rtable *rth)
{
return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- (rt_has_peer(rth) && rt_peer_ptr(rth)->pmtu_expires);
+ rth->dst.expires;
}
static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -917,7 +889,6 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- inetpeer_invalidate_family(AF_INET);
}
/*
@@ -1244,31 +1215,6 @@ skip_hashing:
return rt;
}
-static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
-
-static u32 rt_peer_genid(void)
-{
- return atomic_read(&__rt_peer_genid);
-}
-
-void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
-{
- struct inet_peer_base *base;
- struct inet_peer *peer;
-
- base = inetpeer_base_ptr(rt->_peer);
- if (!base)
- return;
-
- peer = inet_getpeer_v4(base, daddr, create);
- if (peer) {
- if (!rt_set_peer(rt, peer))
- inet_putpeer(peer);
- else
- rt->rt_peer_genid = rt_peer_genid();
- }
-}
-
/*
* Peer allocation may fail only in serious out-of-memory conditions. However
* we still can generate some output.
@@ -1291,20 +1237,15 @@ static void ip_select_fb_ident(struct iphdr *iph)
void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt && !(rt->dst.flags & DST_NOPEER)) {
- struct inet_peer *peer = rt_get_peer_create(rt, rt->rt_dst);
+ struct net *net = dev_net(dst->dev);
+ struct inet_peer *peer;
- /* If peer is attached to destination, it is never detached,
- so that we need not to grab a lock to dereference it.
- */
- if (peer) {
- iph->id = htons(inet_getid(peer, more));
- return;
- }
- } else if (!rt)
- pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
+ peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
+ if (peer) {
+ iph->id = htons(inet_getid(peer, more));
+ inet_putpeer(peer);
+ return;
+ }
ip_select_fb_ident(iph);
}
@@ -1330,30 +1271,6 @@ static void rt_del(unsigned int hash, struct rtable *rt)
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
-{
- struct rtable *rt = (struct rtable *) dst;
- __be32 orig_gw = rt->rt_gateway;
- struct neighbour *n;
-
- dst_confirm(&rt->dst);
-
- rt->rt_gateway = peer->redirect_learned.a4;
-
- n = ipv4_neigh_lookup(&rt->dst, NULL, &rt->rt_gateway);
- if (!n) {
- rt->rt_gateway = orig_gw;
- return;
- }
- if (!(n->nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- } else {
- rt->rt_flags |= RTCF_REDIRECTED;
- call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
- }
- neigh_release(n);
-}
-
/* called in rcu_read_lock() section */
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
@@ -1362,7 +1279,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
struct in_device *in_dev = __in_dev_get_rcu(dev);
__be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
- struct inet_peer *peer;
struct net *net;
if (!in_dev)
@@ -1395,6 +1311,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rthp = &rt_hash_table[hash].chain;
while ((rt = rcu_dereference(*rthp)) != NULL) {
+ struct neighbour *n;
+
rthp = &rt->dst.rt_next;
if (rt->rt_key_dst != daddr ||
@@ -1408,13 +1326,16 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt->rt_gateway != old_gw)
continue;
- peer = rt_get_peer_create(rt, rt->rt_dst);
- if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
- peer->redirect_learned.a4 = new_gw;
- atomic_inc(&__rt_peer_genid);
+ n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+ if (n) {
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ } else {
+ rt->rt_gateway = new_gw;
+ rt->rt_flags |= RTCF_REDIRECTED;
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
- check_peer_redir(&rt->dst, peer);
+ neigh_release(n);
}
}
}
@@ -1432,23 +1353,6 @@ reject_redirect:
;
}
-static bool peer_pmtu_expired(struct inet_peer *peer)
-{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
-
- return orig &&
- time_after_eq(jiffies, orig) &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
-}
-
-static bool peer_pmtu_cleaned(struct inet_peer *peer)
-{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
-
- return orig &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
-}
-
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
@@ -1458,16 +1362,13 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
- } else if (rt->rt_flags & RTCF_REDIRECTED) {
+ } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires) {
unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
rt->rt_oif,
rt_genid(dev_net(dst->dev)));
rt_del(hash, rt);
ret = NULL;
- } else if (rt_has_peer(rt)) {
- struct inet_peer *peer = rt_peer_ptr(rt);
- if (peer_pmtu_expired(peer))
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
}
}
return ret;
@@ -1494,6 +1395,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
+ struct net *net;
int log_martians;
rcu_read_lock();
@@ -1505,7 +1407,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
- peer = rt_get_peer_create(rt, rt->rt_dst);
+ net = dev_net(rt->dst.dev);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
return;
@@ -1522,7 +1425,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
*/
if (peer->rate_tokens >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
- return;
+ goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
@@ -1543,6 +1446,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
&rt->rt_dst, &rt->rt_gateway);
#endif
}
+out_put_peer:
+ inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
@@ -1585,7 +1490,7 @@ static int ip_error(struct sk_buff *skb)
break;
}
- peer = rt_get_peer_create(rt, rt->rt_dst);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
send = true;
if (peer) {
@@ -1598,6 +1503,7 @@ static int ip_error(struct sk_buff *skb)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
+ inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1606,50 +1512,17 @@ out: kfree_skb(skb);
return 0;
}
-static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
-{
- unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
-
- if (!expires)
- return;
- if (time_before(jiffies, expires)) {
- u32 orig_dst_mtu = dst_mtu(dst);
- if (peer->pmtu_learned < orig_dst_mtu) {
- if (!peer->pmtu_orig)
- peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
- }
- } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
-}
-
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{
struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
dst_confirm(dst);
- peer = rt_get_peer_create(rt, rt->rt_dst);
- if (peer) {
- unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
-
- if (mtu < ip_rt_min_pmtu)
- mtu = ip_rt_min_pmtu;
- if (!pmtu_expires || mtu < peer->pmtu_learned) {
-
- pmtu_expires = jiffies + ip_rt_mtu_expires;
- if (!pmtu_expires)
- pmtu_expires = 1UL;
-
- peer->pmtu_learned = mtu;
- peer->pmtu_expires = pmtu_expires;
+ if (mtu < ip_rt_min_pmtu)
+ mtu = ip_rt_min_pmtu;
- atomic_inc(&__rt_peer_genid);
- rt->rt_peer_genid = rt_peer_genid();
- }
- check_peer_pmtu(dst, peer);
- }
+ rt->rt_pmtu = mtu;
+ dst_set_expires(&rt->dst, ip_rt_mtu_expires);
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
@@ -1660,7 +1533,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
struct rtable *rt;
flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
- protocol, flow_flags | FLOWI_FLAG_PRECOW_METRICS,
+ protocol, flow_flags,
iph->daddr, iph->saddr, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
@@ -1681,30 +1554,12 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
-static void ipv4_validate_peer(struct rtable *rt)
-{
- if (rt->rt_peer_genid != rt_peer_genid()) {
- struct inet_peer *peer = rt_get_peer(rt, rt->rt_dst);
-
- if (peer) {
- check_peer_pmtu(&rt->dst, peer);
-
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway)
- check_peer_redir(&rt->dst, peer);
- }
-
- rt->rt_peer_genid = rt_peer_genid();
- }
-}
-
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
if (rt_is_expired(rt))
return NULL;
- ipv4_validate_peer(rt);
return dst;
}
@@ -1716,10 +1571,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
fib_info_put(rt->fi);
rt->fi = NULL;
}
- if (rt_has_peer(rt)) {
- struct inet_peer *peer = rt_peer_ptr(rt);
- inet_putpeer(peer);
- }
}
@@ -1730,11 +1581,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt && rt_has_peer(rt)) {
- struct inet_peer *peer = rt_peer_ptr(rt);
- if (peer_pmtu_cleaned(peer))
- dst_metric_set(&rt->dst, RTAX_MTU, peer->pmtu_orig);
- }
+ if (rt)
+ dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1814,7 +1662,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *) dst;
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+ unsigned int mtu = rt->rt_pmtu;
+
+ if (mtu && time_after_eq(jiffies, rt->dst.expires))
+ mtu = 0;
+
+ if (!mtu)
+ mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt))
return mtu;
@@ -1836,63 +1690,27 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
struct fib_info *fi)
{
- struct inet_peer_base *base;
- struct inet_peer *peer;
- int create = 0;
-
- /* If a peer entry exists for this destination, we must hook
- * it up in order to get at cached metrics.
- */
- if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
- create = 1;
-
- base = inetpeer_base_ptr(rt->_peer);
- BUG_ON(!base);
-
- peer = inet_getpeer_v4(base, rt->rt_dst, create);
- if (peer) {
- __rt_set_peer(rt, peer);
- rt->rt_peer_genid = rt_peer_genid();
- if (inet_metrics_new(peer))
- memcpy(peer->metrics, fi->fib_metrics,
- sizeof(u32) * RTAX_MAX);
- dst_init_metrics(&rt->dst, peer->metrics, false);
-
- check_peer_pmtu(&rt->dst, peer);
-
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- rt->rt_gateway = peer->redirect_learned.a4;
- rt->rt_flags |= RTCF_REDIRECTED;
- }
- } else {
- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
- rt->fi = fi;
- atomic_inc(&fi->fib_clntref);
- }
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
}
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
const struct fib_result *res,
struct fib_info *fi, u16 type, u32 itag)
{
- struct dst_entry *dst = &rt->dst;
-
if (fi) {
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
rt_init_metrics(rt, fl4, fi);
#ifdef CONFIG_IP_ROUTE_CLASSID
- dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
+ rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
}
- if (dst_mtu(dst) > IP_MAX_MTU)
- dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
-
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, fib_rules_tclass(res));
@@ -1964,9 +1782,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_iif = dev->ifindex;
rth->rt_oif = 0;
rth->rt_mark = skb->mark;
+ rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rth->rt_peer_genid = 0;
- rt_init_peer(rth, dev_net(dev)->ipv4.peers);
rth->fi = NULL;
if (our) {
rth->dst.input= ip_local_deliver;
@@ -2090,9 +1907,8 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_iif = in_dev->dev->ifindex;
rth->rt_oif = 0;
rth->rt_mark = skb->mark;
+ rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rth->rt_peer_genid = 0;
- rt_init_peer(rth, &res->table->tb_peers);
rth->fi = NULL;
rth->dst.input = ip_forward;
@@ -2269,9 +2085,8 @@ local_input:
rth->rt_iif = dev->ifindex;
rth->rt_oif = 0;
rth->rt_mark = skb->mark;
+ rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rth->rt_peer_genid = 0;
- rt_init_peer(rth, net->ipv4.peers);
rth->fi = NULL;
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
@@ -2346,7 +2161,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
if (noref) {
dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst);
@@ -2468,11 +2282,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_iif = orig_oif ? : dev_out->ifindex;
rth->rt_oif = orig_oif;
rth->rt_mark = fl4->flowi4_mark;
+ rth->rt_pmtu = 0;
rth->rt_gateway = fl4->daddr;
- rth->rt_peer_genid = 0;
- rt_init_peer(rth, (res->table ?
- &res->table->tb_peers :
- dev_net(dev_out)->ipv4.peers));
rth->fi = NULL;
RT_CACHE_STAT_INC(out_slow_tot);
@@ -2726,7 +2537,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
@@ -2790,7 +2600,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- dst_copy_metrics(new, &ort->dst);
new->dev = ort->dst.dev;
if (new->dev)
@@ -2803,6 +2612,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_iif = ort->rt_iif;
rt->rt_oif = ort->rt_oif;
rt->rt_mark = ort->rt_mark;
+ rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid(net);
rt->rt_flags = ort->rt_flags;
@@ -2810,7 +2620,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src;
rt->rt_gateway = ort->rt_gateway;
- rt_transfer_peer(rt, ort);
rt->fi = ort->fi;
if (rt->fi)
atomic_inc(&rt->fi->fib_clntref);
@@ -2848,7 +2657,7 @@ static int rt_fill_info(struct net *net,
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
- u32 id = 0, ts = 0, tsage = 0, error;
+ u32 error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
@@ -2901,21 +2710,12 @@ static int rt_fill_info(struct net *net,
goto nla_put_failure;
error = rt->dst.error;
- if (rt_has_peer(rt)) {
- const struct inet_peer *peer = rt_peer_ptr(rt);
- inet_peer_refcheck(peer);
- id = atomic_read(&peer->ip_id_count) & 0xffff;
- if (peer->tcp_ts_stamp) {
- ts = peer->tcp_ts;
- tsage = get_seconds() - peer->tcp_ts_stamp;
- }
- expires = ACCESS_ONCE(peer->pmtu_expires);
- if (expires) {
- if (time_before(jiffies, expires))
- expires -= jiffies;
- else
- expires = 0;
- }
+ expires = rt->dst.expires;
+ if (expires) {
+ if (time_before(jiffies, expires))
+ expires -= jiffies;
+ else
+ expires = 0;
}
if (rt_is_input_route(rt)) {
@@ -2944,8 +2744,7 @@ static int rt_fill_info(struct net *net,
goto nla_put_failure;
}
- if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
- expires, error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);