summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-21 10:39:51 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-21 10:39:51 +0100
commit198030782cedf25391e67e7c88b04f87a5eb6563 (patch)
tree5b7368c6bf052bcb4bb273497a57900720d36f51 /net
parent4ec71fa2d2c3f1040348f2604f4b8ccc833d1c2e (diff)
parent92181f190b649f7ef2b79cbf5c00f26ccc66da2a (diff)
Merge branch 'x86/mm' into core/percpu
Conflicts: arch/x86/mm/fault.c
Diffstat (limited to 'net')
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/bridge/br_netfilter.c18
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/can/bcm.c57
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c7
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c6
-rw-r--r--net/ipv4/netfilter/iptable_raw.c6
-rw-r--r--net/ipv4/netfilter/iptable_security.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/tcp.c24
-rw-r--r--net/ipv6/ip6_fib.c15
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh_plink.c1
-rw-r--r--net/mac80211/rc80211_minstrel.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/x_tables.c8
-rw-r--r--net/netfilter/xt_time.c11
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/socket.c63
-rw-r--r--net/xfrm/xfrm_user.c11
26 files changed, 205 insertions, 131 deletions
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 0663f99e977..7ed75c7bd5d 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -23,7 +23,7 @@ config NET_9P_VIRTIO
guest partitions and a host partition.
config NET_9P_RDMA
- depends on INET && INFINIBAND && EXPERIMENTAL
+ depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
tristate "9P RDMA Transport (Experimental)"
help
This builds support for an RDMA transport.
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a65e43a17fb..cf754ace0b7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -58,11 +58,11 @@ static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 1;
-static int brnf_filter_pppoe_tagged __read_mostly = 1;
+static int brnf_filter_vlan_tagged __read_mostly = 0;
+static int brnf_filter_pppoe_tagged __read_mostly = 0;
#else
-#define brnf_filter_vlan_tagged 1
-#define brnf_filter_pppoe_tagged 1
+#define brnf_filter_vlan_tagged 0
+#define brnf_filter_pppoe_tagged 0
#endif
static inline __be16 vlan_proto(const struct sk_buff *skb)
@@ -686,8 +686,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
- else
+ else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+ IS_PPPOE_IPV6(skb))
pf = PF_INET6;
+ else
+ return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
@@ -828,8 +831,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
- else
+ else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+ IS_PPPOE_IPV6(skb))
pf = PF_INET6;
+ else
+ return NF_ACCEPT;
#ifdef CONFIG_NETFILTER_DEBUG
if (skb->dst == NULL) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8a8743d7d6e..820252aee81 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -79,7 +79,7 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
{
par->match = m->u.match;
par->matchinfo = m->data;
- return m->u.match->match(skb, par);
+ return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
}
static inline int ebt_dev_check(char *entry, const struct net_device *device)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 1649c8ab2c2..b7c7d465113 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -347,51 +347,54 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
struct bcm_op *op = (struct bcm_op *)data;
struct bcm_msg_head msg_head;
- /* create notification to user */
- msg_head.opcode = TX_EXPIRED;
- msg_head.flags = op->flags;
- msg_head.count = op->count;
- msg_head.ival1 = op->ival1;
- msg_head.ival2 = op->ival2;
- msg_head.can_id = op->can_id;
- msg_head.nframes = 0;
-
- bcm_send_to_user(op, &msg_head, NULL, 0);
-}
-
-/*
- * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
- */
-static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
-{
- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
- enum hrtimer_restart ret = HRTIMER_NORESTART;
-
if (op->kt_ival1.tv64 && (op->count > 0)) {
op->count--;
- if (!op->count && (op->flags & TX_COUNTEVT))
- tasklet_schedule(&op->tsklet);
+ if (!op->count && (op->flags & TX_COUNTEVT)) {
+
+ /* create notification to user */
+ msg_head.opcode = TX_EXPIRED;
+ msg_head.flags = op->flags;
+ msg_head.count = op->count;
+ msg_head.ival1 = op->ival1;
+ msg_head.ival2 = op->ival2;
+ msg_head.can_id = op->can_id;
+ msg_head.nframes = 0;
+
+ bcm_send_to_user(op, &msg_head, NULL, 0);
+ }
}
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival1),
+ HRTIMER_MODE_ABS);
} else {
if (op->kt_ival2.tv64) {
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival2),
+ HRTIMER_MODE_ABS);
}
}
+}
- return ret;
+/*
+ * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
+ */
+static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+{
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+
+ tasklet_schedule(&op->tsklet);
+
+ return HRTIMER_NORESTART;
}
/*
diff --git a/net/core/dev.c b/net/core/dev.c
index b715a55cccc..8d675975d85 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2392,6 +2392,9 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
+ if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+ goto normal;
+
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
struct sk_buff *p;
@@ -2488,12 +2491,6 @@ EXPORT_SYMBOL(napi_gro_receive);
void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
- skb_shinfo(skb)->nr_frags = 0;
-
- skb->len -= skb->data_len;
- skb->truesize -= skb->data_len;
- skb->data_len = 0;
-
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
@@ -4434,6 +4431,45 @@ err_uninit:
}
/**
+ * init_dummy_netdev - init a dummy network device for NAPI
+ * @dev: device to init
+ *
+ * This takes a network device structure and initialize the minimum
+ * amount of fields so it can be used to schedule NAPI polls without
+ * registering a full blown interface. This is to be used by drivers
+ * that need to tie several hardware interfaces to a single NAPI
+ * poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+ /* Clear everything. Note we don't initialize spinlocks
+ * are they aren't supposed to be taken by any of the
+ * NAPI code and this dummy netdev is supposed to be
+ * only ever used for NAPI polls
+ */
+ memset(dev, 0, sizeof(struct net_device));
+
+ /* make sure we BUG if trying to hit standard
+ * register/unregister code path
+ */
+ dev->reg_state = NETREG_DUMMY;
+
+ /* initialize the ref count */
+ atomic_set(&dev->refcnt, 1);
+
+ /* NAPI wants this */
+ INIT_LIST_HEAD(&dev->napi_list);
+
+ /* a dummy interface is started by default */
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(init_dummy_netdev);
+
+
+/**
* register_netdev - register a network device
* @dev: device to register
*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5110b359c75..65eac773903 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2602,6 +2602,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->truesize -= skb->data_len;
+ skb->len -= skb->data_len;
+ skb->data_len = 0;
+
NAPI_GRO_CB(skb)->free = 1;
goto done;
}
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c9224310eba..52cb6939d09 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -93,13 +93,8 @@ ipt_local_out_hook(unsigned int hook,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_filter: ignoring short SOCK_RAW "
- "packet.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
-
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_filter);
}
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 69f2c428714..3929d20b9e4 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -132,12 +132,8 @@ ipt_local_hook(unsigned int hook,
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr)
- || ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_mangle: ignoring short SOCK_RAW "
- "packet.\n");
+ || ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
/* Save things which could affect route */
mark = skb->mark;
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 8faebfe638f..7f65d18333e 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -65,12 +65,8 @@ ipt_local_hook(unsigned int hook,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_raw: ignoring short SOCK_RAW "
- "packet.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_raw);
}
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 36f3be3cc42..a52a35f4a58 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -96,12 +96,8 @@ ipt_local_out_hook(unsigned int hook,
{
/* Somebody is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr)
- || ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk(KERN_INFO "iptable_security: ignoring short "
- "SOCK_RAW packet.\n");
+ || ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_security);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b2141e11575..4beb04fac58 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -145,11 +145,8 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("ipt_hook: happy cracking.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1fd3ef7718b..2a8bee26f43 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -20,7 +20,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_log.h>
-static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ce572f9dff0..0cd71b84e48 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -522,8 +522,12 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct tcp_splice_state *tss = rd_desc->arg.data;
+ int ret;
- return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
+ ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
+ if (ret > 0)
+ rd_desc->count -= ret;
+ return ret;
}
static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
@@ -531,6 +535,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
/* Store TCP splice context information in read_descriptor_t. */
read_descriptor_t rd_desc = {
.arg.data = tss,
+ .count = tss->len,
};
return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
@@ -611,11 +616,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
tss.len -= ret;
spliced += ret;
+ if (!timeo)
+ break;
release_sock(sk);
lock_sock(sk);
if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
- (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current))
break;
}
@@ -2382,7 +2389,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
unsigned int seq;
__be32 delta;
unsigned int oldlen;
- unsigned int len;
+ unsigned int mss;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
@@ -2398,10 +2405,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
- int mss;
if (unlikely(type &
~(SKB_GSO_TCPV4 |
@@ -2412,7 +2422,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
- mss = skb_shinfo(skb)->gso_size;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
@@ -2423,8 +2432,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
if (IS_ERR(segs))
goto out;
- len = skb_shinfo(skb)->gso_size;
- delta = htonl(oldlen + (thlen + len));
+ delta = htonl(oldlen + (thlen + mss));
skb = segs;
th = tcp_hdr(skb);
@@ -2440,7 +2448,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
csum_fold(csum_partial(skb_transport_header(skb),
thlen, skb->csum));
- seq += len;
+ seq += mss;
skb = skb->next;
th = tcp_hdr(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 29c7c99e69f..52ee1dced2f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
struct fib6_walker_t *w = (void*)cb->args[2];
if (w) {
+ if (cb->args[4]) {
+ cb->args[4] = 0;
+ fib6_walker_unlink(w);
+ }
cb->args[2] = 0;
kfree(w);
}
@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
read_lock_bh(&table->tb6_lock);
res = fib6_walk_continue(w);
read_unlock_bh(&table->tb6_lock);
- if (res != 0) {
- if (res < 0)
- fib6_walker_unlink(w);
- goto end;
+ if (res <= 0) {
+ fib6_walker_unlink(w);
+ cb->args[4] = 0;
}
- fib6_walker_unlink(w);
- cb->args[4] = 0;
}
-end:
+
return res;
}
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index bd52151d31e..c455cf4ee75 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -26,7 +26,7 @@
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
-static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 5f510a13b9f..c5c0c527109 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -469,7 +469,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
struct ieee80211_sub_if_data *sdata;
u16 start_seq_num;
u8 *state;
- int ret;
+ int ret = 0;
if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
return -EINVAL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5abbc3f07dd..b9074824862 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -699,7 +699,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return 0;
/* Setting ad-hoc mode on non-IBSS channel is not supported. */
- if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)
+ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
+ type == NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
/*
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 929ba542fd7..1159bdb4119 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -107,6 +107,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
sta->flags = WLAN_STA_AUTHORIZED;
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ rate_control_rate_init(sta);
return sta;
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 2b3b490a607..3824990d340 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -395,13 +395,15 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
- struct minstrel_rate *mr_ctl;
+ struct ieee80211_local *local = hw_to_local(mp->hw);
+ struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
mi->lowest_rix = rate_lowest_index(sband, sta);
- mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)];
- mi->sp_ack_dur = mr_ctl->ack_time;
+ ctl_rate = &sband->bitrates[mi->lowest_rix];
+ mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
+ !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
for (i = 0; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[n];
@@ -416,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(mi, hw_to_local(mp->hw), mr,
+ calc_rate_durations(mi, local, mr,
&sband->bitrates[i]);
/* calculate maximum number of retransmissions before
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7e83f74cd5d..90ce9ddb945 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -469,7 +469,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
{
- struct nf_conn *ct = NULL;
+ struct nf_conn *ct;
if (unlikely(!nf_conntrack_hash_rnd_initted)) {
get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -551,7 +551,7 @@ init_conntrack(struct net *net,
}
ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
- if (ct == NULL || IS_ERR(ct)) {
+ if (IS_ERR(ct)) {
pr_debug("Can't allocate conntrack.\n");
return (struct nf_conntrack_tuple_hash *)ct;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 00e8c27130f..3dddec6d2f7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1134,7 +1134,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
struct nf_conntrack_helper *helper;
ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
- if (ct == NULL || IS_ERR(ct))
+ if (IS_ERR(ct))
return -ENOMEM;
if (!cda[CTA_TIMEOUT])
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 89837a4eef7..bfbf521f6ea 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -273,6 +273,10 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+
+ if (af != NFPROTO_UNSPEC && !have_rev)
+ return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
return have_rev;
}
@@ -289,6 +293,10 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+
+ if (af != NFPROTO_UNSPEC && !have_rev)
+ return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
return have_rev;
}
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 29375ba8db7..93acaa59d10 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -243,6 +243,17 @@ static struct xt_match xt_time_mt_reg __read_mostly = {
static int __init time_mt_init(void)
{
+ int minutes = sys_tz.tz_minuteswest;
+
+ if (minutes < 0) /* east of Greenwich */
+ printk(KERN_INFO KBUILD_MODNAME
+ ": kernel timezone is +%02d%02d\n",
+ -minutes / 60, -minutes % 60);
+ else /* west of Greenwich */
+ printk(KERN_INFO KBUILD_MODNAME
+ ": kernel timezone is -%02d%02d\n",
+ minutes / 60, minutes % 60);
+
return xt_register_match(&xt_time_mt_reg);
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5070643ce53..2f0f0b04d3f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented
too soon */
- unsigned long stop_at = jiffies + 2;
+ unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
@@ -685,8 +686,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- /* too much load - let's continue on next jiffie */
- return q->now + PSCHED_TICKS_PER_SEC / HZ;
+ /* too much load - let's continue on next jiffie (including above) */
+ return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
+ unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
@@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (!sch->q.qlen)
goto fin;
q->now = psched_get_time();
+ start_at = jiffies;
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
@@ -866,14 +869,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
psched_time_t event;
if (q->now >= q->near_ev_cache[level]) {
- event = htb_do_events(q, level);
+ event = htb_do_events(q, level, start_at);
if (!event)
event = q->now + PSCHED_TICKS_PER_SEC;
q->near_ev_cache[level] = event;
} else
event = q->near_ev_cache[level];
- if (event && next_event > event)
+ if (next_event > event)
next_event = event;
m = ~q->row_mask[level];
diff --git a/net/socket.c b/net/socket.c
index 06603d73c41..35dd7371752 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1214,7 +1214,7 @@ int sock_create_kern(int family, int type, int protocol, struct socket **res)
return __sock_create(&init_net, family, type, protocol, res, 1);
}
-asmlinkage long sys_socket(int family, int type, int protocol)
+SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
{
int retval;
struct socket *sock;
@@ -1255,8 +1255,8 @@ out_release:
* Create a pair of connected sockets.
*/
-asmlinkage long sys_socketpair(int family, int type, int protocol,
- int __user *usockvec)
+SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ int __user *, usockvec)
{
struct socket *sock1, *sock2;
int fd1, fd2, err;
@@ -1356,7 +1356,7 @@ out_fd1:
* the protocol layer (having also checked the address is ok).
*/
-asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
+SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1385,7 +1385,7 @@ asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
* ready for listening.
*/
-asmlinkage long sys_listen(int fd, int backlog)
+SYSCALL_DEFINE2(listen, int, fd, int, backlog)
{
struct socket *sock;
int err, fput_needed;
@@ -1418,8 +1418,8 @@ asmlinkage long sys_listen(int fd, int backlog)
* clean when we restucture accept also.
*/
-asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags)
+SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen, int, flags)
{
struct socket *sock, *newsock;
struct file *newfile;
@@ -1502,8 +1502,8 @@ out_fd:
goto out_put;
}
-asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen)
+SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen)
{
return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
}
@@ -1520,8 +1520,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
* include the -EINPROGRESS status for such sockets.
*/
-asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr,
- int addrlen)
+SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ int, addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1552,8 +1552,8 @@ out:
* name to user space.
*/
-asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len)
+SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1583,8 +1583,8 @@ out:
* name to user space.
*/
-asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len)
+SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1615,9 +1615,9 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
* the protocol.
*/
-asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
- unsigned flags, struct sockaddr __user *addr,
- int addr_len)
+SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int, addr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1660,7 +1660,8 @@ out:
* Send a datagram down a socket.
*/
-asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
+SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags)
{
return sys_sendto(fd, buff, len, flags, NULL, 0);
}
@@ -1671,9 +1672,9 @@ asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
* sender address from kernel to user space.
*/
-asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
- unsigned flags, struct sockaddr __user *addr,
- int __user *addr_len)
+SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int __user *, addr_len)
{
struct socket *sock;
struct iovec iov;
@@ -1725,8 +1726,8 @@ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
* to pass the user mode parameter for the protocols to sort out.
*/
-asmlinkage long sys_setsockopt(int fd, int level, int optname,
- char __user *optval, int optlen)
+SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int, optlen)
{
int err, fput_needed;
struct socket *sock;
@@ -1759,8 +1760,8 @@ out_put:
* to pass a user mode parameter for the protocols to sort out.
*/
-asmlinkage long sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen)
+SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int __user *, optlen)
{
int err, fput_needed;
struct socket *sock;
@@ -1789,7 +1790,7 @@ out_put:
* Shutdown a socket.
*/
-asmlinkage long sys_shutdown(int fd, int how)
+SYSCALL_DEFINE2(shutdown, int, fd, int, how)
{
int err, fput_needed;
struct socket *sock;
@@ -1815,7 +1816,7 @@ asmlinkage long sys_shutdown(int fd, int how)
* BSD sendmsg interface
*/
-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -1921,8 +1922,8 @@ out:
* BSD recvmsg interface
*/
-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg,
- unsigned int flags)
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -2045,7 +2046,7 @@ static const unsigned char nargs[19]={
* it is set by the callees.
*/
-asmlinkage long sys_socketcall(int call, unsigned long __user *args)
+SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
{
unsigned long a[6];
unsigned long a0, a1;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b95a2d64eb5..7877e7975da 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1914,10 +1914,17 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
}
#endif
+/* For the xfrm_usersa_info cases we have to work around some 32-bit vs.
+ * 64-bit compatability issues. On 32-bit the structure is 220 bytes, but
+ * for 64-bit it gets padded out to 224 bytes. Those bytes are just
+ * padding and don't have any content we care about. Therefore as long
+ * as we have enough bytes for the content we can make both cases work.
+ */
+
#define XMSGSIZE(type) sizeof(struct type)
static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
- [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
+ [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = 220, /* see above */
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
@@ -1927,7 +1934,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
- [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
+ [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = 220, /* see above */
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,