diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-05-12 13:06:08 +0100 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-05-12 13:06:08 +0100 |
commit | 25061d285747f20aafa4b50df1b0b5665fef29cd (patch) | |
tree | c302ac93e3476788a4671ee556a902fce2592f3a /net/core | |
parent | 7a6476143270d947924f5bbbc124accb0e558bf4 (diff) | |
parent | 6560ffd1ccd688152393dc7c35dbdcc33140633b (diff) |
Merge tag 'regmap-3.4' into regmap-stride
regmap: Last minute bug fix for 3.4
This is a last minute bug fix that was only just noticed since the code
path that's being exercised here is one that is fairly rarely used. The
changelog for the change itself is extremely clear and the code itself
is obvious to inspection so should be pretty safe.
Conflicts:
drivers/base/regmap/regmap.c (overlap between the fix and stride code)
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 20 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 89 | ||||
-rw-r--r-- | net/core/net_namespace.c | 33 | ||||
-rw-r--r-- | net/core/skbuff.c | 4 |
4 files changed, 106 insertions, 40 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index c25d453b280..9bb8f87c4cd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier); * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. + * + * After unregistering unregister and down device events are synthesized + * for all devices on the device list to the removed notifier to remove + * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) { + struct net_device *dev; + struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); + if (err) + goto unlock; + + for_each_net(net) { + for_each_netdev(net, dev) { + if (dev->flags & IFF_UP) { + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); + nb->notifier_call(nb, NETDEV_DOWN, dev); + } + nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); + } + } +unlock: rtnl_unlock(); return err; } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 7f36b38e060..a7cad741df0 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused); * netlink alerts */ static int trace_state = TRACE_OFF; -static DEFINE_SPINLOCK(trace_state_lock); +static DEFINE_MUTEX(trace_state_mutex); struct per_cpu_dm_data { struct work_struct dm_alert_work; - struct sk_buff *skb; + struct sk_buff __rcu *skb; atomic_t dm_hit_count; struct timer_list send_timer; + int cpu; }; struct dm_hw_stat_delta { @@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) size_t al; struct net_dm_alert_msg *msg; struct nlattr *nla; + struct sk_buff *skb; + struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); al += sizeof(struct nlattr); - data->skb = genlmsg_new(al, GFP_KERNEL); - genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, - 0, NET_DM_CMD_ALERT); - nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); - msg = nla_data(nla); - memset(msg, 0, al); - atomic_set(&data->dm_hit_count, dm_hit_limit); + skb = genlmsg_new(al, GFP_KERNEL); + + if (skb) { + genlmsg_put(skb, 0, 0, &net_drop_monitor_family, + 0, NET_DM_CMD_ALERT); + nla = nla_reserve(skb, NLA_UNSPEC, + sizeof(struct net_dm_alert_msg)); + msg = nla_data(nla); + memset(msg, 0, al); + } else + schedule_work_on(data->cpu, &data->dm_alert_work); + + /* + * Don't need to lock this, since we are guaranteed to only + * run this on a single cpu at a time. + * Note also that we only update data->skb if the old and new skb + * pointers don't match. This ensures that we don't continually call + * synchornize_rcu if we repeatedly fail to alloc a new netlink message. + */ + if (skb != oskb) { + rcu_assign_pointer(data->skb, skb); + + synchronize_rcu(); + + atomic_set(&data->dm_hit_count, dm_hit_limit); + } + } static void send_dm_alert(struct work_struct *unused) { struct sk_buff *skb; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + WARN_ON_ONCE(data->cpu != smp_processor_id()); /* * Grab the skb we're about to send */ - skb = data->skb; + skb = rcu_dereference_protected(data->skb, 1); /* * Replace it with a new one @@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused) /* * Ship it! */ - genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + if (skb) + genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + put_cpu_var(dm_cpu_data); } /* @@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused) */ static void sched_send_work(unsigned long unused) { - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + schedule_work_on(smp_processor_id(), &data->dm_alert_work); - schedule_work(&data->dm_alert_work); + put_cpu_var(dm_cpu_data); } static void trace_drop_common(struct sk_buff *skb, void *location) @@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location) struct nlmsghdr *nlh; struct nlattr *nla; int i; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct sk_buff *dskb; + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + + rcu_read_lock(); + dskb = rcu_dereference(data->skb); + if (!dskb) + goto out; if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { /* @@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) goto out; } - nlh = (struct nlmsghdr *)data->skb->data; + nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { msg->points[i].count++; + atomic_inc(&data->dm_hit_count); goto out; } } @@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) /* * We need to create a new entry */ - __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); + __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); msg->points[msg->entries].count = 1; @@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) } out: + rcu_read_unlock(); + put_cpu_var(dm_cpu_data); return; } @@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state) struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *temp; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); if (state == trace_state) { rc = -EAGAIN; @@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state) rc = -EINPROGRESS; out_unlock: - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); return rc; } @@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block, new_stat->dev = dev; new_stat->last_rx = jiffies; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_add_rcu(&new_stat->list, &hw_stats_list); - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; case NETDEV_UNREGISTER: - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { new_stat->dev = NULL; @@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, } } } - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; } out: @@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void) for_each_present_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); - reset_per_cpu_data(data); + data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); init_timer(&data->send_timer); data->send_timer.data = cpu; data->send_timer.function = sched_send_work; + reset_per_cpu_data(data); } + goto out; out_unreg: diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 0e950fda9a0..31a5ae51a45 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -83,21 +83,29 @@ assign: static int ops_init(const struct pernet_operations *ops, struct net *net) { - int err; + int err = -ENOMEM; + void *data = NULL; + if (ops->id && ops->size) { - void *data = kzalloc(ops->size, GFP_KERNEL); + data = kzalloc(ops->size, GFP_KERNEL); if (!data) - return -ENOMEM; + goto out; err = net_assign_generic(net, *ops->id, data); - if (err) { - kfree(data); - return err; - } + if (err) + goto cleanup; } + err = 0; if (ops->init) - return ops->init(net); - return 0; + err = ops->init(net); + if (!err) + return 0; + +cleanup: + kfree(data); + +out: + return err; } static void ops_free(const struct pernet_operations *ops, struct net *net) @@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { - int err = 0; - err = ops_init(ops, &init_net); - if (err) - ops_free(ops, &init_net); - return err; - + return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index baf8d281152..e59840010d4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, goto adjust_others; } - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask); if (!data) goto nodata; + size = SKB_WITH_OVERHEAD(ksize(data)); /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. |