diff options
Diffstat (limited to 'net')
44 files changed, 167 insertions, 104 deletions
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index d9f04864d15..8ca448db7a0 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -23,7 +23,7 @@ #include <asm/atomic.h> #include "br_private.h" -static kmem_cache_t *br_fdb_cache __read_mostly; +static struct kmem_cache *br_fdb_cache __read_mostly; static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); diff --git a/net/core/dev.c b/net/core/dev.c index 59d058a3b50..e660cb57e42 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev) EXPORT_SYMBOL(unregister_netdev); -#ifdef CONFIG_HOTPLUG_CPU static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) @@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_NET_DMA /** diff --git a/net/core/dst.c b/net/core/dst.c index 1a5e49da0e7..836ec660692 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops) if (ops->gc()) return NULL; } - dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); + dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); if (!dst) return NULL; memset(dst, 0, ops->entry_size); diff --git a/net/core/flow.c b/net/core/flow.c index b16d31ae5e5..d137f971f97 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; #define flow_table(cpu) (per_cpu(flow_tables, cpu)) -static kmem_cache_t *flow_cachep __read_mostly; +static struct kmem_cache *flow_cachep __read_mostly; static int flow_lwm, flow_hwm; @@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, if (flow_count(cpu) > flow_hwm) flow_cache_shrink(cpu); - fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); + fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { fle->next = *head; *head = fle; @@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu) tasklet_init(tasklet, flow_cache_flush_tasklet, 0); } -#ifdef CONFIG_HOTPLUG_CPU static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb, __flow_cache_shrink((unsigned long)hcpu, 0); return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ static int __init flow_cache_init(void) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ba509a4a8e9..0ab1987b934 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) goto out_entries; } - n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); + n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); if (!n) goto out_entries; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e1c385e5ba..de7801d589e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -68,8 +68,8 @@ #include "kmap_skb.h" -static kmem_cache_t *skbuff_head_cache __read_mostly; -static kmem_cache_t *skbuff_fclone_cache __read_mostly; +static struct kmem_cache *skbuff_head_cache __read_mostly; +static struct kmem_cache *skbuff_fclone_cache __read_mostly; /* * Keep out-of-line to prevent kernel bloat. @@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); * @gfp_mask: allocation mask * @fclone: allocate from fclone cache instead of head cache * and allocate a cloned (child) skb + * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. @@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug); * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, - int fclone) + int fclone, int node) { - kmem_cache_t *cache; + struct kmem_cache *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; @@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; /* Get the HEAD */ - skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); + skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), - gfp_mask); + data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), + gfp_mask, node); if (!data) goto nodata; @@ -210,7 +211,7 @@ nodata: * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, +struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, unsigned int size, gfp_t gfp_mask) { @@ -267,9 +268,10 @@ nodata: struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask) { + int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; struct sk_buff *skb; - skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); + skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; diff --git a/net/core/sock.c b/net/core/sock.c index 419c7d3289c..0ed5b4f0bc4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -810,24 +810,11 @@ lenout: */ static void inline sock_lock_init(struct sock *sk) { - spin_lock_init(&sk->sk_lock.slock); - sk->sk_lock.owner = NULL; - init_waitqueue_head(&sk->sk_lock.wq); - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); - - /* - * Mark both the sk_lock and the sk_lock.slock as a - * per-address-family lock class: - */ - lockdep_set_class_and_name(&sk->sk_lock.slock, - af_family_slock_keys + sk->sk_family, - af_family_slock_key_strings[sk->sk_family]); - lockdep_init_map(&sk->sk_lock.dep_map, - af_family_key_strings[sk->sk_family], - af_family_keys + sk->sk_family, 0); + sock_lock_init_class_and_name(sk, + af_family_slock_key_strings[sk->sk_family], + af_family_slock_keys + sk->sk_family, + af_family_key_strings[sk->sk_family], + af_family_keys + sk->sk_family); } /** @@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; - kmem_cache_t *slab = prot->slab; + struct kmem_cache *slab = prot->slab; if (slab != NULL) sk = kmem_cache_alloc(slab, priority); diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bdf1bb7a82c..1f4727ddbdb 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -21,8 +21,8 @@ #include <net/sock.h> -static kmem_cache_t *dccp_ackvec_slab; -static kmem_cache_t *dccp_ackvec_record_slab; +static struct kmem_cache *dccp_ackvec_slab; +static struct kmem_cache *dccp_ackvec_record_slab; static struct dccp_ackvec_record *dccp_ackvec_record_new(void) { diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index ff05e59043c..d8cf92f09e6 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void) #define ccids_read_unlock() do { } while(0) #endif -static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) +static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) { - kmem_cache_t *slab; + struct kmem_cache *slab; char slab_name_fmt[32], *slab_name; va_list args; @@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) return slab; } -static void ccid_kmem_cache_destroy(kmem_cache_t *slab) +static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { if (slab != NULL) { const char *name = kmem_cache_name(slab); diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index c7c29514dce..bcc2d12ae81 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -27,9 +27,9 @@ struct ccid_operations { unsigned char ccid_id; const char *ccid_name; struct module *ccid_owner; - kmem_cache_t *ccid_hc_rx_slab; + struct kmem_cache *ccid_hc_rx_slab; __u32 ccid_hc_rx_obj_size; - kmem_cache_t *ccid_hc_tx_slab; + struct kmem_cache *ccid_hc_tx_slab; __u32 ccid_hc_tx_obj_size; int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cf8c07b2704..66a27b9688c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); if (new_packet == NULL || new_packet->dccphtx_sent) { new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, - SLAB_ATOMIC); + GFP_ATOMIC); if (unlikely(new_packet == NULL)) { DCCP_WARN("%s, sk=%p, not enough mem to add to history," @@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) /* new loss event detected */ /* calculate last interval length */ seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); - entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); + entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC); if (entry == NULL) { DCCP_BUG("out of memory - can not allocate entry"); @@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) } packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, - skb, SLAB_ATOMIC); + skb, GFP_ATOMIC); if (unlikely(packet == NULL)) { DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " "to history, consider it lost!\n", dccp_role(sk), sk); diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 48b9b93f8ac..0a0baef16b3 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, int i; for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { - entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); + entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); DCCP_BUG("loss interval list entry is NULL"); diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 0ae85f0340b..eb257014dd7 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -20,7 +20,7 @@ #define DCCP_LI_HIST_IVAL_F_LENGTH 8 struct dccp_li_hist { - kmem_cache_t *dccplih_slab; + struct kmem_cache *dccplih_slab; }; extern struct dccp_li_hist *dccp_li_hist_new(const char *name); diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 067cf1c85a3..9a8bcf224aa 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -68,14 +68,14 @@ struct dccp_rx_hist_entry { }; struct dccp_tx_hist { - kmem_cache_t *dccptxh_slab; + struct kmem_cache *dccptxh_slab; }; extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); struct dccp_rx_hist { - kmem_cache_t *dccprxh_slab; + struct kmem_cache *dccprxh_slab; }; extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index bdbc3f43166..13b2421991b 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; static DEFINE_RWLOCK(dn_fib_tables_lock); -static kmem_cache_t *dn_hash_kmem __read_mostly; +static struct kmem_cache *dn_hash_kmem __read_mostly; static int dn_fib_hash_zombies; static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) @@ -590,7 +590,7 @@ create: replace: err = -ENOBUFS; - new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); + new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); if (new_f == NULL) goto out; diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 107bb6cbb0b..648f47c1c39 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -45,8 +45,8 @@ #include "fib_lookup.h" -static kmem_cache_t *fn_hash_kmem __read_mostly; -static kmem_cache_t *fn_alias_kmem __read_mostly; +static struct kmem_cache *fn_hash_kmem __read_mostly; +static struct kmem_cache *fn_alias_kmem __read_mostly; struct fib_node { struct hlist_node fn_hash; @@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; new_f = NULL; if (!f) { - new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); + new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); if (new_f == NULL) goto out_free_new_fa; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d17990ec724..cfb249cc0a5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); static struct tnode *halve(struct trie *t, struct tnode *tn); static void tnode_free(struct tnode *tn); -static kmem_cache_t *fn_alias_kmem __read_mostly; +static struct kmem_cache *fn_alias_kmem __read_mostly; static struct trie *trie_local = NULL, *trie_main = NULL; @@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) u8 state; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; @@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 244c4f445c7..8c79c8a4ea5 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -27,11 +27,11 @@ * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ -struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, +struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, struct inet_bind_hashbucket *head, const unsigned short snum) { - struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); + struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); if (tb != NULL) { tb->port = snum; @@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, /* * Caller must hold hashbucket lock for this tb with local BH disabled */ -void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) +void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 061fd7a961b..9f414e35c48 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat { struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, - SLAB_ATOMIC); + GFP_ATOMIC); if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f072f3875af..711eb6d0285 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -73,7 +73,7 @@ /* Exported for inet_getid inline function. */ DEFINE_SPINLOCK(inet_peer_idlock); -static kmem_cache_t *peer_cachep __read_mostly; +static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height static struct inet_peer peer_fake_node = { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index efcf45ecc81..ecb5422ea23 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); In this case data path is free of exclusive locks at all. */ -static kmem_cache_t *mrt_cachep __read_mostly; +static struct kmem_cache *mrt_cachep __read_mostly; static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 8832eb517d5..8086787a2c5 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c @@ -44,7 +44,7 @@ static struct list_head *ip_vs_conn_tab; /* SLAB cache for IPVS connections */ -static kmem_cache_t *ip_vs_conn_cachep __read_mostly; +static struct kmem_cache *ip_vs_conn_cachep __read_mostly; /* counter for current IPVS connections */ static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index f4b0e68a16d..8556a4f4f60 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -65,8 +65,8 @@ static LIST_HEAD(helpers); unsigned int ip_conntrack_htable_size __read_mostly = 0; int ip_conntrack_max __read_mostly; struct list_head *ip_conntrack_hash __read_mostly; -static kmem_cache_t *ip_conntrack_cachep __read_mostly; -static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; +static struct kmem_cache *ip_conntrack_cachep __read_mostly; +static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; struct ip_conntrack ip_conntrack_untracked; unsigned int ip_ct_log_invalid __read_mostly; static LIST_HEAD(unconfirmed); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 87c8f54872b..e5cd83b2205 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2]) { if (ptr == NULL) return; - if (ptr[0]) - free_percpu(ptr[0]); - if (ptr[1]) - free_percpu(ptr[1]); + free_percpu(ptr[0]); + free_percpu(ptr[1]); ptr[0] = ptr[1] = NULL; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bf526115e51..96d8310ae9c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -50,7 +50,7 @@ struct rt6_statistics rt6_stats; -static kmem_cache_t * fib6_node_kmem __read_mostly; +static struct kmem_cache * fib6_node_kmem __read_mostly; enum fib_walk_state_t { @@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void) { struct fib6_node *fn; - if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL) + if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) memset(fn, 0, sizeof(struct fib6_node)); return fn; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 01a5c52a2be..12e426b9aac 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi; #define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff -static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; +static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 @@ -180,7 +180,7 @@ try_next_2:; spi = 0; goto out; alloc_spi: - x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); + x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); if (!x6spi) goto out; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9b3158ce46e..9b02ec4012f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -108,7 +108,7 @@ static struct { size_t size; /* slab cache pointer */ - kmem_cache_t *cachep; + struct kmem_cache *cachep; /* allocated slab cache + modules which uses this slab cache */ int use; @@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name, { int ret = 0; char *cache_name; - kmem_cache_t *cachep; + struct kmem_cache *cachep; DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", features, name, size); @@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ void nf_conntrack_unregister_cache(u_int32_t features) { - kmem_cache_t *cachep; + struct kmem_cache *cachep; char *name; /* diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 7df8f9a2f86..9cbf926cdd1 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -29,7 +29,7 @@ LIST_HEAD(nf_conntrack_expect_list); EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); -kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; +struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; static unsigned int nf_conntrack_expect_next_id; /* nf_conntrack_expect helper functions */ diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a98de0b54d6..a5a6e192ac2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -92,7 +92,7 @@ struct xt_hashlimit_htable { static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static HLIST_HEAD(hashlimit_htables); -static kmem_cache_t *hashlimit_cachep __read_mostly; +static struct kmem_cache *hashlimit_cachep __read_mostly; static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) { diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c index dada34a77b2..49effd92144 100644 --- a/net/rxrpc/krxiod.c +++ b/net/rxrpc/krxiod.c @@ -13,6 +13,7 @@ #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/freezer.h> #include <rxrpc/krxiod.h> #include <rxrpc/transport.h> #include <rxrpc/peer.h> diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c index cea4eb5e249..3ab0f77409f 100644 --- a/net/rxrpc/krxsecd.c +++ b/net/rxrpc/krxsecd.c @@ -27,6 +27,7 @@ #include <rxrpc/call.h> #include <linux/udp.h> #include <linux/ip.h> +#include <linux/freezer.h> #include <net/sock.h> #include "internal.h" diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 3e7466900bd..9a9b6132dba 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/freezer.h> #include <rxrpc/rxrpc.h> #include <rxrpc/krxtimod.h> #include <asm/errno.h> diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 11f3b549f4a..f2ba8615895 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific; static struct sctp_af *sctp_af_v4_specific; static struct sctp_af *sctp_af_v6_specific; -kmem_cache_t *sctp_chunk_cachep __read_mostly; -kmem_cache_t *sctp_bucket_cachep __read_mostly; +struct kmem_cache *sctp_chunk_cachep __read_mostly; +struct kmem_cache *sctp_bucket_cachep __read_mostly; /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 04954e5f684..30927d3a597 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -65,7 +65,7 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> -extern kmem_cache_t *sctp_chunk_cachep; +extern struct kmem_cache *sctp_chunk_cachep; SCTP_STATIC struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, @@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, { struct sctp_chunk *retval; - retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); + retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02b27145b27..1e8132b8c4d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; -extern kmem_cache_t *sctp_bucket_cachep; +extern struct kmem_cache *sctp_bucket_cachep; /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) @@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( { struct sctp_bind_bucket *pp; - pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); + pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); SCTP_DBG_OBJCNT_INC(bind_bucket); if (pp) { pp->port = snum; diff --git a/net/socket.c b/net/socket.c index e8db54702a6..29ea1de43ec 100644 --- a/net/socket.c +++ b/net/socket.c @@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, #define SOCKFS_MAGIC 0x534F434B -static kmem_cache_t *sock_inode_cachep __read_mostly; +static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; - ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; init_waitqueue_head(&ei->socket.wait); @@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode) container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; @@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = { static int sockfs_delete_dentry(struct dentry *dentry) { - return 1; + /* + * At creation time, we pretended this dentry was hashed + * (by clearing DCACHE_UNHASHED bit in d_flags) + * At delete time, we restore the truth : not hashed. + * (so that dput() can proceed correctly) + */ + dentry->d_flags |= DCACHE_UNHASHED; + return 0; } static struct dentry_operations sockfs_dentry_operations = { .d_delete = sockfs_delete_dentry, @@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file) this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); this.name = name; - this.hash = SOCK_INODE(sock)->i_ino; + this.hash = 0; file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); if (unlikely(!file->f_dentry)) return -ENOMEM; file->f_dentry->d_op = &sockfs_dentry_operations; - d_add(file->f_dentry, SOCK_INODE(sock)); + /* + * We dont want to push this dentry into global dentry hash table. + * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED + * This permits a working /proc/$pid/fd/XXX on sockets + */ + file->f_dentry->d_flags &= ~DCACHE_UNHASHED; + d_instantiate(file->f_dentry, SOCK_INODE(sock)); file->f_vfsmnt = mntget(sock_mnt); file->f_mapping = file->f_dentry->d_inode->i_mapping; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 49dba5febbb..19703aa9659 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -33,7 +33,7 @@ static int rpc_mount_count; static struct file_system_type rpc_pipe_fs_type; -static kmem_cache_t *rpc_inode_cachep __read_mostly; +static struct kmem_cache *rpc_inode_cachep __read_mostly; #define RPC_UPCALL_TIMEOUT (30*HZ) @@ -143,7 +143,7 @@ static struct inode * rpc_alloc_inode(struct super_block *sb) { struct rpc_inode *rpci; - rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); + rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); if (!rpci) return NULL; return &rpci->vfs_inode; @@ -824,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = { }; static void -init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct rpc_inode *rpci = (struct rpc_inode *) foo; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index eff44bcdc95..225e6510b52 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -34,8 +34,8 @@ static int rpc_task_id; #define RPC_BUFFER_MAXSIZE (2048) #define RPC_BUFFER_POOLSIZE (8) #define RPC_TASK_POOLSIZE (8) -static kmem_cache_t *rpc_task_slabp __read_mostly; -static kmem_cache_t *rpc_buffer_slabp __read_mostly; +static struct kmem_cache *rpc_task_slabp __read_mostly; +static struct kmem_cache *rpc_buffer_slabp __read_mostly; static mempool_t *rpc_task_mempool __read_mostly; static mempool_t *rpc_buffer_mempool __read_mostly; diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index ee9bb1522d5..c7bb5f7f21a 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister); #define DN_HASHMASK (DN_HASHMAX-1) static struct hlist_head auth_domain_table[DN_HASHMAX]; -static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t auth_domain_lock = + __SPIN_LOCK_UNLOCKED(auth_domain_lock); void auth_domain_put(struct auth_domain *dom) { diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 64ca1f61dd9..99f54fb6d66 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -32,6 +32,7 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/file.h> +#include <linux/freezer.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> @@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req); */ static int svc_conn_age_period = 6*60; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key svc_key[2]; +static struct lock_class_key svc_slock_key[2]; + +static inline void svc_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + BUG_ON(sk->sk_lock.owner != NULL); + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", + &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); + break; + + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", + &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); + break; + + default: + BUG(); + } +} +#else +static inline void svc_reclassify_socket(struct socket *sock) +{ +} +#endif + /* * Queue up an idle server thread. Must have pool->sp_lock held. * Note: this is really a stack rather than a queue, so that we only @@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) return error; + svc_reclassify_socket(sock); + if (type == SOCK_STREAM) sock->sk->sk_reuse = 1; /* allow address reuse */ error = kernel_bind(sock, (struct sockaddr *) sin, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index cfe3c15be94..2fc4a312326 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1058,6 +1058,35 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) return err; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key xs_key[2]; +static struct lock_class_key xs_slock_key[2]; + +static inline void xs_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + BUG_ON(sk->sk_lock.owner != NULL); + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", + &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); + break; + + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", + &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); + break; + + default: + BUG(); + } +} +#else +static inline void xs_reclassify_socket(struct socket *sock) +{ +} +#endif + /** * xs_udp_connect_worker - set up a UDP socket * @work: RPC transport to connect @@ -1081,6 +1110,7 @@ static void xs_udp_connect_worker(struct work_struct *work) dprintk("RPC: can't create UDP transport socket (%d).\n", -err); goto out; } + xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { sock_release(sock); @@ -1165,6 +1195,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) dprintk("RPC: can't create TCP transport socket (%d).\n", -err); goto out; } + xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { sock_release(sock); diff --git a/net/tipc/handler.c b/net/tipc/handler.c index ae6ddf00a1a..eb80778d6d9 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -42,7 +42,7 @@ struct queue_item { unsigned long data; }; -static kmem_cache_t *tipc_queue_item_cache; +static struct kmem_cache *tipc_queue_item_cache; static struct list_head signal_queue_head; static DEFINE_SPINLOCK(qitem_lock); static int handler_enabled = 0; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index e8198a2c785..414f8907038 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -12,7 +12,7 @@ #include <net/ip.h> #include <net/xfrm.h> -static kmem_cache_t *secpath_cachep __read_mostly; +static struct kmem_cache *secpath_cachep __read_mostly; void __secpath_destroy(struct sec_path *sp) { @@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; - sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC); + sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); if (!sp) return NULL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 140bb9b384a..bebd40e5a62 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -40,7 +40,7 @@ EXPORT_SYMBOL(xfrm_policy_count); static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; -static kmem_cache_t *xfrm_dst_cache __read_mostly; +static struct kmem_cache *xfrm_dst_cache __read_mostly; static struct work_struct xfrm_policy_gc_work; static HLIST_HEAD(xfrm_policy_gc_list); |