From eb409460b1abec0e2a1f9c9d07019f4157a6d6bc Mon Sep 17 00:00:00 2001 From: Lijun Chen Date: Mon, 16 Oct 2006 21:59:42 -0700 Subject: [TIPC]: Added subscription cancellation capability This patch allows a TIPC application to cancel an existing topology service subscription by re-requesting the subscription with the TIPC_SUB_CANCEL filter bit set. (All other bits of the cancel request must match the original subscription request.) Signed-off-by: Allan Stephens Signed-off-by: Per Liden Signed-off-by: David S. Miller --- include/linux/tipc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/tipc.h b/include/linux/tipc.h index 243a15f5400..bea469455a0 100644 --- a/include/linux/tipc.h +++ b/include/linux/tipc.h @@ -129,6 +129,7 @@ static inline unsigned int tipc_node(__u32 addr) #define TIPC_SUB_PORTS 0x01 /* filter for port availability */ #define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ +#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ #if 0 /* The following filter options are not currently implemented */ #define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */ -- cgit v1.2.3-70-g09d2 From e320af1df4c47305e829e8e1a40e5fad0e5e9fba Mon Sep 17 00:00:00 2001 From: Ville Nuorvala Date: Mon, 16 Oct 2006 22:05:55 -0700 Subject: [IPV6]: Remove struct pol_chain. Struct pol_chain has existed since at least the 2.2 kernel, but isn't used anymore. As the IPv6 policy routing is implemented in a totally different way in the current kernel, just get rid of it. Signed-off-by: Ville Nuorvala Signed-off-by: David S. Miller --- include/net/ip6_route.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include') diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 6ca6b71dfe0..c14b70ed4c5 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -36,13 +36,6 @@ struct route_info { #define RT6_LOOKUP_F_REACHABLE 0x2 #define RT6_LOOKUP_F_HAS_SADDR 0x4 -struct pol_chain { - int type; - int priority; - struct fib6_node *rules; - struct pol_chain *next; -}; - extern struct rt6_info ip6_null_entry; #ifdef CONFIG_IPV6_MULTIPLE_TABLES -- cgit v1.2.3-70-g09d2 From b52f070c9c3c09ed3b7f699280193aae7e25d816 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Wed, 18 Oct 2006 20:26:36 -0700 Subject: [IPv4] fib: Remove unused fib_config members Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/ip_fib.h | 5 +---- net/ipv4/fib_frontend.c | 5 ----- 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 82229146bac..949b932d2f0 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -21,17 +21,14 @@ #include struct fib_config { - u8 fc_family; u8 fc_dst_len; - u8 fc_src_len; u8 fc_tos; u8 fc_protocol; u8 fc_scope; u8 fc_type; - /* 1 byte unused */ + /* 3 bytes unused */ u32 fc_table; __be32 fc_dst; - __be32 fc_src; __be32 fc_gw; int fc_oif; u32 fc_flags; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 9c399a70dd5..af0190d8b6c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -482,9 +482,7 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, memset(cfg, 0, sizeof(*cfg)); rtm = nlmsg_data(nlh); - cfg->fc_family = rtm->rtm_family; cfg->fc_dst_len = rtm->rtm_dst_len; - cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_tos = rtm->rtm_tos; cfg->fc_table = rtm->rtm_table; cfg->fc_protocol = rtm->rtm_protocol; @@ -501,9 +499,6 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, case RTA_DST: cfg->fc_dst = nla_get_be32(attr); break; - case RTA_SRC: - cfg->fc_src = nla_get_be32(attr); - break; case RTA_OIF: cfg->fc_oif = nla_get_u32(attr); break; -- cgit v1.2.3-70-g09d2 From ae8064ac32d07f609114d73928cdef803be87134 Mon Sep 17 00:00:00 2001 From: John Heffner Date: Wed, 18 Oct 2006 20:36:48 -0700 Subject: [TCP]: Bound TSO defer time This patch limits the amount of time you will defer sending a TSO segment to less than two clock ticks, or the time between two acks, whichever is longer. On slow links, deferring causes significant bursts. See attached plots, which show RTT through a 1 Mbps link with a 100 ms RTT and ~100 ms queue for (a) non-TSO, (b) currnet TSO, and (c) patched TSO. This burstiness causes significant jitter, tends to overflow queues early (bad for short queues), and makes delay-based congestion control more difficult. Deferring by a couple clock ticks I believe will have a relatively small impact on performance. Signed-off-by: John Heffner Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 ++ net/ipv4/tcp_output.c | 20 +++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 0e058a2d1c6..2d36f6db370 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -342,6 +342,8 @@ struct tcp_sock { unsigned long last_synq_overflow; + __u32 tso_deferred; + /* Receiver side RTT estimation */ struct { __u32 rtt; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f22536e32cb..ca406157724 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1096,10 +1096,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ u32 send_win, cong_win, limit, in_flight; if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) - return 0; + goto send_now; if (icsk->icsk_ca_state != TCP_CA_Open) - return 0; + goto send_now; + + /* Defer for less than two clock ticks. */ + if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1) + goto send_now; in_flight = tcp_packets_in_flight(tp); @@ -1115,7 +1119,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ /* If a full-sized TSO skb can be sent, do it. */ if (limit >= 65536) - return 0; + goto send_now; if (sysctl_tcp_tso_win_divisor) { u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); @@ -1125,7 +1129,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ */ chunk /= sysctl_tcp_tso_win_divisor; if (limit >= chunk) - return 0; + goto send_now; } else { /* Different approach, try not to defer past a single * ACK. Receiver should ACK every other full sized @@ -1133,11 +1137,17 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ * then send now. */ if (limit > tcp_max_burst(tp) * tp->mss_cache) - return 0; + goto send_now; } /* Ok, it looks like it is advisable to defer. */ + tp->tso_deferred = 1 | (jiffies<<1); + return 1; + +send_now: + tp->tso_deferred = 0; + return 0; } /* Create a new MTU probe if we are ready. -- cgit v1.2.3-70-g09d2 From 78d79423179c0efc7ec34b55d287e7be4ca07da6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Oct 2006 00:28:35 -0700 Subject: [IPV4] inet_peer: Group together avl_left, avl_right, v4daddr to speedup lookups on some CPUS Lot of routers/embedded devices still use CPUS with 16/32 bytes cache lines. (486, Pentium, ... PIII) It makes sense to group together fields used at lookup time so they fit in one cache line. This reduce cache footprint and speedup lookups. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inetpeer.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index f13cc0c2b16..aa10a8178e7 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -17,14 +17,15 @@ struct inet_peer { + /* group together avl_left,avl_right,v4daddr to speedup lookups */ struct inet_peer *avl_left, *avl_right; + __be32 v4daddr; /* peer's address */ + __u16 avl_height; + __u16 ip_id_count; /* IP ID for the next packet */ struct inet_peer *unused_next, **unused_prevp; __u32 dtime; /* the time of last use of not * referenced entries */ atomic_t refcnt; - __be32 v4daddr; /* peer's address */ - __u16 avl_height; - __u16 ip_id_count; /* IP ID for the next packet */ atomic_t rid; /* Frag reception counter */ __u32 tcp_ts; unsigned long tcp_ts_stamp; -- cgit v1.2.3-70-g09d2