diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-05-15 23:57:10 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-15 23:57:10 -0700 |
commit | 3b098e2d7c693796cc4dffb07caa249fc0f70771 (patch) | |
tree | 586c4f5dc57988ade175ffc7e4b6d0261b12e166 /net/core/dev.c | |
parent | a1aa3483041bd3691c7f029272ccef4ce70bd957 (diff) |
net: Consistent skb timestamping
With RPS inclusion, skb timestamping is not consistent in RX path.
If netif_receive_skb() is used, its deferred after RPS dispatch.
If netif_rx() is used, its done before RPS dispatch.
This can give strange tcpdump timestamps results.
I think timestamping should be done as soon as possible in the receive
path, to get meaningful values (ie timestamps taken at the time packet
was delivered by NIC driver to our stack), even if NAPI already can
defer timestamping a bit (RPS can help to reduce the gap)
Tom Herbert prefer to sample timestamps after RPS dispatch. In case
sampling is expensive (HPET/acpi_pm on x86), this makes sense.
Let admins switch from one mode to another, using a new
sysctl, /proc/sys/net/core/netdev_tstamp_prequeue
Its default value (1), means timestamps are taken as soon as possible,
before backlog queueing, giving accurate timestamps.
Setting a 0 value permits to sample timestamps when processing backlog,
after RPS dispatch, to lower the load of the pre-RPS cpu.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 5cbba0927a8..988e42912e7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1454,7 +1454,7 @@ void net_disable_timestamp(void) } EXPORT_SYMBOL(net_disable_timestamp); -static inline void net_timestamp(struct sk_buff *skb) +static inline void net_timestamp_set(struct sk_buff *skb) { if (atomic_read(&netstamp_needed)) __net_timestamp(skb); @@ -1462,6 +1462,12 @@ static inline void net_timestamp(struct sk_buff *skb) skb->tstamp.tv64 = 0; } +static inline void net_timestamp_check(struct sk_buff *skb) +{ + if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) + __net_timestamp(skb); +} + /** * dev_forward_skb - loopback an skb to another netif * @@ -1508,9 +1514,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_NET_CLS_ACT if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) - net_timestamp(skb); + net_timestamp_set(skb); #else - net_timestamp(skb); + net_timestamp_set(skb); #endif rcu_read_lock(); @@ -2201,6 +2207,7 @@ EXPORT_SYMBOL(dev_queue_xmit); =======================================================================*/ int netdev_max_backlog __read_mostly = 1000; +int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; int weight_p __read_mostly = 64; /* old backlog weight */ @@ -2465,8 +2472,8 @@ int netif_rx(struct sk_buff *skb) if (netpoll_rx(skb)) return NET_RX_DROP; - if (!skb->tstamp.tv64) - net_timestamp(skb); + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); #ifdef CONFIG_RPS { @@ -2791,8 +2798,8 @@ static int __netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; __be16 type; - if (!skb->tstamp.tv64) - net_timestamp(skb); + if (!netdev_tstamp_prequeue) + net_timestamp_check(skb); if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) return NET_RX_SUCCESS; @@ -2910,23 +2917,28 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { + if (netdev_tstamp_prequeue) + net_timestamp_check(skb); + #ifdef CONFIG_RPS - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu, ret; + { + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu, ret; - rcu_read_lock(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); - cpu = get_rps_cpu(skb->dev, skb, &rflow); + if (cpu >= 0) { + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + ret = __netif_receive_skb(skb); + } - if (cpu >= 0) { - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - rcu_read_unlock(); - } else { - rcu_read_unlock(); - ret = __netif_receive_skb(skb); + return ret; } - - return ret; #else return __netif_receive_skb(skb); #endif |