summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c6
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c44
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/ip_output.c9
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6mr.c6
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mlme.c92
-rw-r--r--net/mac80211/rx.c16
-rw-r--r--net/mac80211/work.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/x_tables.c17
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/sched/act_nat.c4
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_u32.c49
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sunrpc/xprtsock.c38
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
45 files changed, 324 insertions, 139 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc1025..50f58f5f1c3 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
return NET_RX_DROP;
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
- goto drop;
+ skb->deliver_no_wcard = 1;
skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
struct sk_buff *p;
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
- goto drop;
+ skb->deliver_no_wcard = 1;
skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 55be90826f5..52984267781 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
- dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
+ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_MASTER | IFF_SLAVE);
dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 0faad5ce6dc..8c100c9dae2 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev)
break;
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
+
+ i++;
}
r->len = htons(skb->len - len);
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 26637439965..b01dde35a69 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
unsigned long delay = hold_time(br);
- unsigned long next_timer = jiffies + br->forward_delay;
+ unsigned long next_timer = jiffies + br->ageing_time;
int i;
spin_lock_bh(&br->hash_lock);
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data)
}
spin_unlock_bh(&br->hash_lock);
- /* Add HZ/4 to ensure we round the jiffies upwards to be after the next
- * timer, otherwise we might round down and will have no-op run. */
- mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
+ mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
}
/* Completely flush all dynamic entries in forwarding database.*/
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a98ef139309..a4e72a89e4f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -140,10 +140,10 @@ static int deliver_clone(const struct net_bridge_port *prev,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
- struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
-
dev->stats.tx_dropped++;
return -ENOMEM;
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec93..fd27b172fb5 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_err("CAIF: %s():Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index cb4325a3dc8..965c5baace4 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
u8 stx = CFSERL_STX;
int ret;
u16 expectlen = 0;
+
caif_assert(newpkt != NULL);
spin_lock(&layr->sync);
if (layr->incomplete_frm != NULL) {
-
layr->incomplete_frm =
cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
pkt = layr->incomplete_frm;
- if (pkt == NULL)
+ if (pkt == NULL) {
+ spin_unlock(&layr->sync);
return -ENOMEM;
+ }
} else {
pkt = newpkt;
}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f4949..e04f7d964e8 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
return ret;
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1845b08c624..2b3bf53bc68 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
- if (net_ratelimit()) {
- pr_warning("%s received packet on queue "
- "%u, but number of RX queues is %u\n",
- dev->name, index, dev->num_rx_queues);
- }
+ WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
+ "on queue %u, but number of RX queues is %u\n",
+ dev->name, index, dev->num_rx_queues);
goto done;
}
rxqueue = dev->_rx + index;
@@ -2795,7 +2793,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
struct net_device *orig_dev;
struct net_device *master;
struct net_device *null_or_orig;
- struct net_device *null_or_bond;
+ struct net_device *orig_or_bond;
int ret = NET_RX_DROP;
__be16 type;
@@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
+ /*
+ * bonding note: skbs received on inactive slaves should only
+ * be delivered to pkt handlers that are exact matches. Also
+ * the deliver_no_wcard flag will be set. If packet handlers
+ * are sensitive to duplicate packets these skbs will need to
+ * be dropped at the handler. The vlan accel path may have
+ * already set the deliver_no_wcard flag.
+ */
null_or_orig = NULL;
orig_dev = skb->dev;
master = ACCESS_ONCE(orig_dev->master);
- if (master) {
- if (skb_bond_should_drop(skb, master))
+ if (skb->deliver_no_wcard)
+ null_or_orig = orig_dev;
+ else if (master) {
+ if (skb_bond_should_drop(skb, master)) {
+ skb->deliver_no_wcard = 1;
null_or_orig = orig_dev; /* deliver only exact match */
- else
+ } else
skb->dev = master;
}
@@ -2868,10 +2877,10 @@ ncls:
* device that may have registered for a specific ptype. The
* handler may have to adjust skb->dev and orig_dev.
*/
- null_or_bond = NULL;
+ orig_or_bond = orig_dev;
if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
(vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
- null_or_bond = vlan_dev_real_dev(skb->dev);
+ orig_or_bond = vlan_dev_real_dev(skb->dev);
}
type = skb->protocol;
@@ -2879,7 +2888,7 @@ ncls:
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type && (ptype->dev == null_or_orig ||
ptype->dev == skb->dev || ptype->dev == orig_dev ||
- ptype->dev == null_or_bond)) {
+ ptype->dev == orig_or_bond)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe..785e5276a30 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT;
+static DEFINE_SPINLOCK(est_tree_lock);
static void est_timer(unsigned long arg)
{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
*
* Returns 0 on success or a negative error code.
*
- * NOTE: Called under rtnl_mutex
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ spin_lock(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
+ spin_unlock(&est_tree_lock);
return 0;
}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
*
* Removes the rate estimator specified by &bstats and &rate_est.
*
- * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est)
{
struct gen_estimator *e;
+ spin_lock(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
+ spin_unlock(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est)
{
+ bool res;
+
ASSERT_RTNL();
- return gen_find_node(bstats, rate_est) != NULL;
+ spin_lock(&est_tree_lock);
+ res = gen_find_node(bstats, rate_est) != NULL;
+ spin_unlock(&est_tree_lock);
+
+ return res;
}
EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418d..1dacd7ba8db 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
end_time = ktime_now();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
- pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
+ pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f8abf68e398..34432b4e96b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
* reference count dropping and cleans up the skbuff as if it
* just came from __alloc_skb().
*/
-int skb_recycle_check(struct sk_buff *skb, int skb_size)
+bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
struct skb_shared_info *shinfo;
if (irqs_disabled())
- return 0;
+ return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return 0;
+ return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
if (skb_end_pointer(skb) - skb->head < skb_size)
- return 0;
+ return false;
if (skb_shared(skb) || skb_cloned(skb))
- return 0;
+ return false;
skb_release_head_state(skb);
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
skb->data = skb->head + NET_SKB_PAD;
skb_reset_tail_pointer(skb);
- return 1;
+ return true;
}
EXPORT_SYMBOL(skb_recycle_check);
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
new->priority = old->priority;
+ new->deliver_no_wcard = old->deliver_no_wcard;
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
new->ipvs_property = old->ipvs_property;
#endif
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(len);
C(data_len);
C(mac_len);
- C(rxhash);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
}
EXPORT_SYMBOL_GPL(skb_cow_data);
+static void sock_rmem_free(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+}
+
+/*
+ * Note: We dont mem charge error packets (no sk_forward_alloc changes)
+ */
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
+ return -ENOMEM;
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_rmem_free;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+
+ skb_queue_tail(&sk->sk_error_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb->len);
+ return 0;
+}
+EXPORT_SYMBOL(sock_queue_err_skb);
+
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps)
{
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
err = sock_queue_err_skb(sk, skb);
+
if (err)
kfree_skb(skb);
}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e3a1fd938a..7c3a7d19124 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -303,7 +303,7 @@ config ARPD
If unsure, say N.
config SYN_COOKIES
- bool "IP: TCP syncookie support (disabled per default)"
+ bool "IP: TCP syncookie support"
---help---
Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote
@@ -328,13 +328,13 @@ config SYN_COOKIES
server is really overloaded. If this happens frequently better turn
them off.
- If you say Y here, note that SYN cookies aren't enabled by default;
- you can enable them by saying Y to "/proc file system support" and
+ If you say Y here, you can disable SYN cookies at run time by
+ saying Y to "/proc file system support" and
"Sysctl support" below and executing the command
- echo 1 >/proc/sys/net/ipv4/tcp_syncookies
+ echo 0 > /proc/sys/net/ipv4/tcp_syncookies
- at boot time after the /proc file system has been mounted.
+ after the /proc file system has been mounted.
If unsure, say N.
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9a4a6c96cb0..041d41df122 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
+ skb = skb_peek_tail(&sk->sk_write_queue);
+
inet->cork.length += length;
- if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
+ if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk,
* adding appropriate IP header.
*/
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+ if (!skb)
goto alloc_new_skb;
while (length > 0) {
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
return -EINVAL;
inet->cork.length += size;
- if ((sk->sk_protocol == IPPROTO_UDP) &&
+ if ((size + skb->len > mtu) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 856123fe32f..757f25eb9b4 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
{
struct mr_table *mrt, *next;
- list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
+ list_del(&mrt->list);
kfree(mrt);
+ }
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
#else
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 63958f3394a..4b6c5ca610f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries[cpu];
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = &private->stackptr[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c24db4a3c9..9f6b22206c5 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
{ .sport = th->dest,
.dport = th->source } } };
security_req_classify_flow(req, &fl);
- if (ip_route_output_key(&init_net, &rt, &fl)) {
+ if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
reqsk_free(req);
goto out;
}
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index c209e054a63..377bc934937 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* calculate 2^fract in a <<7 value.
*/
is_slowstart = 1;
- increment = ((1 << ca->rho) * hybla_fraction(rho_fractions))
- - 128;
+ increment = ((1 << min(ca->rho, 16U)) *
+ hybla_fraction(rho_fractions)) - 128;
} else {
/*
* congestion avoidance
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e6dafcb107..548d575e6cc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
if (sk->sk_family == AF_INET) {
printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
msg,
- &inet->daddr, ntohs(inet->dport),
+ &inet->inet_daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
struct ipv6_pinfo *np = inet6_sk(sk);
printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg,
- &np->daddr, ntohs(inet->dport),
+ &np->daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 202cf09c4cd..fe193e53af4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ sock_rps_save_rxhash(sk, skb->rxhash);
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
}
return 0;
}
- }
+ } else
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
@@ -1672,8 +1675,6 @@ process:
skb->dev = NULL;
- sock_rps_save_rxhash(sk, skb->rxhash);
-
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 58585748bda..eec4ff456e3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
- } else {
+ } else
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
- }
+
sk->sk_err = err;
sk->sk_error_report(sk);
out:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce799298255..03e62f94ff8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
np->tclass, NULL, &fl, (struct rt6_info*)dst,
MSG_DONTWAIT, np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
goto out_put;
}
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
goto out_put;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 073071f2b75..66078dad7fe 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
-#define ip6mr_for_each_table(mrt, met) \
+#define ip6mr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
- list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
+ list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
+ list_del(&mrt->list);
ip6mr_free_table(mrt);
+ }
fib_rules_unregister(net->ipv6.mr6_rules_ops);
}
#else
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 59f1881968c..ab1622d7d40 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err);
+ size += LL_ALLOCATED_SPACE(dev);
+ /* limit our allocations to order-0 page */
+ size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+ skb = sock_alloc_send_skb(sk, size, 1, &err);
if (!skb)
return NULL;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0abdc242ddb..2efef52fb46 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = 0;
+ inc_opt |= ifp->idev->cnf.force_tllao;
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
icmp6h.icmp6_solicited = solicited;
icmp6h.icmp6_override = override;
- inc_opt |= ifp->idev->cnf.force_tllao;
__ndisc_send(dev, neigh, daddr, src_addr,
&icmp6h, solicited_addr,
inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 6f517bd8369..9d2d68f0e60 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries[cpu];
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
- stackptr = &private->stackptr[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 294cbe8b072..252d76199c4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
{
int flags = 0;
- if (fl->oif || rt6_need_strict(&fl->fl6_dst))
+ if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c163d0a149f..98258b7341e 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
spin_unlock(&local->ampdu_lock);
- spin_unlock_bh(&sta->lock);
- /* send an addBA request */
+ /* prepare tid data */
sta->ampdu_mlme.dialog_token_allocator++;
sta->ampdu_mlme.tid_tx[tid]->dialog_token =
sta->ampdu_mlme.dialog_token_allocator;
sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
+ spin_unlock_bh(&sta->lock);
+
+ /* send AddBA request */
ieee80211_send_addba_request(sdata, pubsta->addr, tid,
sta->ampdu_mlme.tid_tx[tid]->dialog_token,
sta->ampdu_mlme.tid_tx[tid]->ssn,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d218c530a4..32be11e4c4d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -5,7 +5,7 @@
#include <linux/nl80211.h>
#include "ieee80211_i.h"
-enum ieee80211_chan_mode
+static enum ieee80211_chan_mode
__ieee80211_get_channel_mode(struct ieee80211_local *local,
struct ieee80211_sub_if_data *ignore)
{
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4f227131665..9c1da080916 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
struct survey_info *survey)
{
int ret = -EOPNOTSUPP;
- if (local->ops->conf_tx)
+ if (local->ops->get_survey)
ret = local->ops->get_survey(&local->hw, idx, survey);
/* trace_drv_get_survey(local, idx, survey, ret); */
return ret;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0839c4e8fd2..f803f8b72a9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
- if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_BACK: {
+ struct ieee80211_local *local = sdata->local;
+ int len = skb->len;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta) {
+ rcu_read_unlock();
+ break;
+ }
+
+ local_bh_disable();
+
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.addba_req)))
+ break;
+ ieee80211_process_addba_request(local, sta, mgmt, len);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.addba_resp)))
+ break;
+ ieee80211_process_addba_resp(local, sta, mgmt, len);
+ break;
+ case WLAN_ACTION_DELBA:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.delba)))
+ break;
+ ieee80211_process_delba(sdata, sta, mgmt, len);
+ break;
+ }
+ local_bh_enable();
+ rcu_read_unlock();
break;
-
- ieee80211_sta_process_chanswitch(sdata,
- &mgmt->u.action.u.chan_switch.sw_elem,
- (void *)ifmgd->associated->priv,
- rx_status->mactime);
- break;
+ }
+ case WLAN_CATEGORY_SPECTRUM_MGMT:
+ ieee80211_sta_process_chanswitch(sdata,
+ &mgmt->u.action.u.chan_switch.sw_elem,
+ (void *)ifmgd->associated->priv,
+ rx_status->mactime);
+ break;
+ }
}
mutex_unlock(&ifmgd->mtx);
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);
if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
- (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
- cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+
+ if (wk->type != IEEE80211_WORK_ASSOC)
+ continue;
+
+ if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
+ continue;
+ if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
+ continue;
+ /*
+ * Printing the message only here means we can't
+ * spuriously print it, but it also means that it
+ * won't be printed when the frame comes in before
+ * we even tried to associate or in similar cases.
+ *
+ * Ultimately, I suspect cfg80211 should print the
+ * messages instead.
+ */
+ printk(KERN_DEBUG
+ "%s: deauthenticated from %pM (Reason: %u)\n",
+ sdata->name, mgmt->bssid,
+ le16_to_cpu(mgmt->u.deauth.reason_code));
+
+ list_del_rcu(&wk->list);
+ free_work(wk);
+ break;
+ }
+ mutex_unlock(&local->work_mtx);
+
+ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+ }
out:
kfree_skb(skb);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6e2a7bcd8cb..be9abc2e634 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
return RX_CONTINUE;
if (ieee80211_is_back_req(bar->frame_control)) {
+ struct {
+ __le16 control, start_seq_num;
+ } __packed bar_data;
+
if (!rx->sta)
return RX_DROP_MONITOR;
+
+ if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
+ &bar_data, sizeof(bar_data)))
+ return RX_DROP_MONITOR;
+
spin_lock(&rx->sta->lock);
- tid = le16_to_cpu(bar->control) >> 12;
+ tid = le16_to_cpu(bar_data.control) >> 12;
if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
spin_unlock(&rx->sta->lock);
return RX_DROP_MONITOR;
}
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
- start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
+ start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
/* reset session timer */
if (tid_agg_rx->timeout)
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
break;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
if (len < (IEEE80211_MIN_ACTION_SIZE +
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index be3d4a69869..b025dc7bb0f 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
struct ieee80211_work *wk;
- enum work_action rma;
+ enum work_action rma = WORK_ACT_NONE;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d8f7e8ef67b..ff04e9edbed 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -162,6 +162,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -174,6 +175,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
ret = 0;
}
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
@@ -193,6 +195,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
list_del(&cp->c_list);
@@ -202,6 +205,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
} else
ret = 0;
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 445de702b8b..e34622fa000 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
vfree(info->jumpstack);
else
kfree(info->jumpstack);
- if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
- vfree(info->stackptr);
- else
- kfree(info->stackptr);
+
+ free_percpu(info->stackptr);
kfree(info);
}
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
unsigned int size;
int cpu;
- size = sizeof(unsigned int) * nr_cpu_ids;
- if (size > PAGE_SIZE)
- i->stackptr = vmalloc(size);
- else
- i->stackptr = kmalloc(size, GFP_KERNEL);
+ i->stackptr = alloc_percpu(unsigned int);
if (i->stackptr == NULL)
return -ENOMEM;
- memset(i->stackptr, 0, size);
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
struct xt_table_info *private;
struct xt_table *t, *table;
- ret = xt_jumpstack_alloc(newinfo);
- if (ret < 0)
- return ERR_PTR(ret);
-
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 7b048a35ca5..94d72e85a47 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk)
lock_sock(sk);
if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
skparent = pn->listener;
- sk_del_node_init(sk);
release_sock(sk);
- sk = skparent;
pn = pep_sk(skparent);
- lock_sock(sk);
+ lock_sock(skparent);
+ sk_del_node_init(sk);
+ sk = skparent;
}
/* Unhash a listening sock only when it is closed
* and all of its active connected pipes are closed. */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 10ed0d55f75..f68832798db 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_ib_setup_qp(conn);
if (err) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
+ mutex_unlock(&conn->c_cm_lock);
goto out;
}
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a9d951b4fba..b5dd6ac39be 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_iw_setup_qp(conn);
if (err) {
rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
+ mutex_unlock(&conn->c_cm_lock);
goto out;
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d885ba31156..570949417f3 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
iph->daddr = new_addr;
csum_replace4(&iph->check, addr, new_addr);
+ } else if ((iph->frag_off & htons(IP_OFFSET)) ||
+ iph->protocol != IPPROTO_ICMP) {
+ goto out;
}
ihl = iph->ihl * 4;
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
break;
}
+out:
return action;
drop:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fdbd0b7bd84..50e3d945e1f 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_pedit *p = a->priv;
int i, munged = 0;
- u8 *pptr;
+ unsigned int off;
if (!(skb->tc_verd & TC_OK2MUNGE)) {
/* should we set skb->cloned? */
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
}
}
- pptr = skb_network_header(skb);
+ off = skb_network_offset(skb);
spin_lock(&p->tcf_lock);
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
struct tc_pedit_key *tkey = p->tcfp_keys;
for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
- u32 *ptr;
+ u32 *ptr, _data;
int offset = tkey->off;
if (tkey->offmask) {
- if (skb->len > tkey->at) {
- char *j = pptr + tkey->at;
- offset += ((*j & tkey->offmask) >>
- tkey->shift);
- } else {
+ char *d, _d;
+
+ d = skb_header_pointer(skb, off + tkey->at, 1,
+ &_d);
+ if (!d)
goto bad;
- }
+ offset += (*d & tkey->offmask) >> tkey->shift;
}
if (offset % 4) {
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
goto bad;
}
- ptr = (u32 *)(pptr+offset);
+ ptr = skb_header_pointer(skb, off + offset, 4, &_data);
+ if (!ptr)
+ goto bad;
/* just do it, baby */
*ptr = ((*ptr & tkey->mask) ^ tkey->val);
+ if (ptr == &_data)
+ skb_store_bits(skb, off + offset, ptr, 4);
munged++;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 96275422c61..4f522143811 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
{
struct {
struct tc_u_knode *knode;
- u8 *ptr;
+ unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
- u8 *ptr = skb_network_header(skb);
+ unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
int off2 = 0;
@@ -134,8 +134,14 @@ next_knode:
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
-
- if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
+ unsigned int toff;
+ __be32 *data, _data;
+
+ toff = off + key->off + (off2 & key->offmask);
+ data = skb_header_pointer(skb, toff, 4, &_data);
+ if (!data)
+ goto out;
+ if ((*data ^ key->val) & key->mask) {
n = n->next;
goto next_knode;
}
@@ -174,29 +180,45 @@ check_terminal:
if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop;
stack[sdepth].knode = n;
- stack[sdepth].ptr = ptr;
+ stack[sdepth].off = off;
sdepth++;
ht = n->ht_down;
sel = 0;
- if (ht->divisor)
- sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
-
+ if (ht->divisor) {
+ __be32 *data, _data;
+
+ data = skb_header_pointer(skb, off + n->sel.hoff, 4,
+ &_data);
+ if (!data)
+ goto out;
+ sel = ht->divisor & u32_hash_fold(*data, &n->sel,
+ n->fshift);
+ }
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
goto next_ht;
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
- if (n->sel.flags&TC_U32_VAROFFSET)
- off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
+ if (n->sel.flags & TC_U32_VAROFFSET) {
+ __be16 *data, _data;
+
+ data = skb_header_pointer(skb,
+ off + n->sel.offoff,
+ 2, &_data);
+ if (!data)
+ goto out;
+ off2 += ntohs(n->sel.offmask & *data) >>
+ n->sel.offshift;
+ }
off2 &= ~3;
}
if (n->sel.flags&TC_U32_EAT) {
- ptr += off2;
+ off += off2;
off2 = 0;
}
- if (ptr < skb_tail_pointer(skb))
+ if (off < skb->len)
goto next_ht;
}
@@ -204,9 +226,10 @@ check_terminal:
if (sdepth--) {
n = stack[sdepth].knode;
ht = n->ht_up;
- ptr = stack[sdepth].ptr;
+ off = stack[sdepth].off;
goto check_terminal;
}
+out:
return -1;
deadloop:
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3415b6ce1c0..807643bdcba 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev)
dev->tx_queue_len = 100;
dev->flags = IFF_NOARP;
dev->hard_header_len = LL_MAX_HEADER;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static LIST_HEAD(master_dev_list);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2a9675136c6..7ca65c7005e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -210,7 +210,8 @@ struct sock_xprt {
* State of TCP reply receive
*/
__be32 tcp_fraghdr,
- tcp_xid;
+ tcp_xid,
+ tcp_calldir;
u32 tcp_offset,
tcp_reclen;
@@ -927,7 +928,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
{
size_t len, used;
u32 offset;
- __be32 calldir;
+ char *p;
/*
* We want transport->tcp_offset to be 8 at the end of this routine
@@ -936,26 +937,33 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
* transport->tcp_offset is 4 (after having already read the xid).
*/
offset = transport->tcp_offset - sizeof(transport->tcp_xid);
- len = sizeof(calldir) - offset;
+ len = sizeof(transport->tcp_calldir) - offset;
dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
- used = xdr_skb_read_bits(desc, &calldir, len);
+ p = ((char *) &transport->tcp_calldir) + offset;
+ used = xdr_skb_read_bits(desc, p, len);
transport->tcp_offset += used;
if (used != len)
return;
transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
/*
* We don't yet have the XDR buffer, so we will write the calldir
* out after we get the buffer from the 'struct rpc_rqst'
*/
- if (ntohl(calldir) == RPC_REPLY)
+ switch (ntohl(transport->tcp_calldir)) {
+ case RPC_REPLY:
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
transport->tcp_flags |= TCP_RPC_REPLY;
- else
+ break;
+ case RPC_CALL:
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
transport->tcp_flags &= ~TCP_RPC_REPLY;
- dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
- (transport->tcp_flags & TCP_RPC_REPLY) ?
- "reply for" : "request with", calldir);
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+ xprt_force_disconnect(&transport->xprt);
+ }
xs_tcp_check_fraghdr(transport);
}
@@ -975,12 +983,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
/*
* Save the RPC direction in the XDR buffer
*/
- __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
- htonl(RPC_REPLY) : 0;
-
memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
- &calldir, sizeof(calldir));
- transport->tcp_copied += sizeof(calldir);
+ &transport->tcp_calldir,
+ sizeof(transport->tcp_calldir));
+ transport->tcp_copied += sizeof(transport->tcp_calldir);
transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6a329158bdf..a3cca0a9434 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -95,13 +95,13 @@ resume:
goto error_nolock;
}
- dst = dst_pop(dst);
+ dst = skb_dst_pop(skb);
if (!dst) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set(skb, dst);
+ skb_dst_set_noref(skb, dst);
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d965a2bad8d..af1c173be4a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
return 0;
}
+ skb_dst_force(skb);
dst = skb_dst(skb);
res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
@@ -2299,7 +2300,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
return 0;
if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
- if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
+ if (xdst->num_pols > 0 &&
+ xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
if (strict && fl &&