From 945cf2b6199be70ff03102b9e642c3bb05d01de9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:18:33 +0000 Subject: mm/sl[aou]b: Extract a common function for kmem_cache_destroy kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm/slab.h') diff --git a/mm/slab.h b/mm/slab.h index db7848caaa2..07a537ed5da 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -30,4 +30,7 @@ extern struct list_head slab_caches; struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +int __kmem_cache_shutdown(struct kmem_cache *); +void __kmem_cache_destroy(struct kmem_cache *); + #endif -- cgit v1.2.3-70-g09d2 From 9b030cb865f137e1574596983face2a07e41e8b2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:20:33 +0000 Subject: mm/sl[aou]b: Use "kmem_cache" name for slab cache with kmem_cache struct Make all allocators use the "kmem_cache" slabname for the "kmem_cache" structure. Reviewed-by: Glauber Costa Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 72 +++++++++++++++++++++++++++++--------------------------- mm/slab.h | 6 +++++ mm/slab_common.c | 1 + mm/slob.c | 8 +++++++ mm/slub.c | 2 -- 5 files changed, 52 insertions(+), 37 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.c b/mm/slab.c index 49a74b349e3..ef94799a1aa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; -static struct kmem_cache cache_cache = { - .nodelists = cache_cache_nodelists, +static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; +static struct kmem_cache kmem_cache_boot = { + .nodelists = kmem_cache_nodelists, .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, @@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void) int order; int node; + kmem_cache = &kmem_cache_boot; + if (num_possible_nodes() == 1) use_alien_caches = 0; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); if (i < MAX_NUMNODES) - cache_cache.nodelists[i] = NULL; + kmem_cache->nodelists[i] = NULL; } - set_up_list3s(&cache_cache, CACHE_CACHE); + set_up_list3s(kmem_cache, CACHE_CACHE); /* * Fragmentation resistance on low memory - only use bigger @@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void) /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: - * 1) initialize the cache_cache cache: it contains the struct - * kmem_cache structures of all caches, except cache_cache itself: - * cache_cache is statically allocated. + * 1) initialize the kmem_cache cache: it contains the struct + * kmem_cache structures of all caches, except kmem_cache itself: + * kmem_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. @@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void) * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. - * 4) Replace the __init data head arrays for cache_cache and the first + * 4) Replace the __init data head arrays for kmem_cache and the first * kmalloc cache with kmalloc allocated arrays. - * 5) Replace the __init data for kmem_list3 for cache_cache and + * 5) Replace the __init data for kmem_list3 for kmem_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ node = numa_mem_id(); - /* 1) create the cache_cache */ + /* 1) create the kmem_cache */ INIT_LIST_HEAD(&slab_caches); - list_add(&cache_cache.list, &slab_caches); - cache_cache.colour_off = cache_line_size(); - cache_cache.array[smp_processor_id()] = &initarray_cache.cache; - cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; + list_add(&kmem_cache->list, &slab_caches); + kmem_cache->colour_off = cache_line_size(); + kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; + kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ - cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + + kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + nr_node_ids * sizeof(struct kmem_list3 *); - cache_cache.object_size = cache_cache.size; - cache_cache.size = ALIGN(cache_cache.size, + kmem_cache->object_size = kmem_cache->size; + kmem_cache->size = ALIGN(kmem_cache->object_size, cache_line_size()); - cache_cache.reciprocal_buffer_size = - reciprocal_value(cache_cache.size); + kmem_cache->reciprocal_buffer_size = + reciprocal_value(kmem_cache->size); for (order = 0; order < MAX_ORDER; order++) { - cache_estimate(order, cache_cache.size, - cache_line_size(), 0, &left_over, &cache_cache.num); - if (cache_cache.num) + cache_estimate(order, kmem_cache->size, + cache_line_size(), 0, &left_over, &kmem_cache->num); + if (kmem_cache->num) break; } - BUG_ON(!cache_cache.num); - cache_cache.gfporder = order; - cache_cache.colour = left_over / cache_cache.colour_off; - cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + + BUG_ON(!kmem_cache->num); + kmem_cache->gfporder = order; + kmem_cache->colour = left_over / kmem_cache->colour_off; + kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ @@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, cpu_cache_get(&cache_cache), + BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(kmem_cache), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - cache_cache.array[smp_processor_id()] = ptr; + kmem_cache->array[smp_processor_id()] = ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); @@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void) int nid; for_each_online_node(nid) { - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); + init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); @@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep) kfree(l3); } } - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); } @@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, gfp = GFP_NOWAIT; /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(&cache_cache, gfp); + cachep = kmem_cache_zalloc(kmem_cache, gfp); if (!cachep) return NULL; @@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); return NULL; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) @@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { - if (cachep == &cache_cache) + if (cachep == kmem_cache) return false; return should_failslab(cachep->object_size, flags, cachep->flags); diff --git a/mm/slab.h b/mm/slab.h index 07a537ed5da..6724aa6f662 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -25,8 +25,14 @@ extern enum slab_state slab_state; /* The slab cache mutex protects the management structures during changes */ extern struct mutex slab_mutex; + +/* The list of all slab caches on the system */ extern struct list_head slab_caches; +/* The slab cache that manages slab cache information */ +extern struct kmem_cache *kmem_cache; + +/* Functions provided by the slab allocators */ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index a1c4f0b5aae..5374150f548 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -22,6 +22,7 @@ enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); +struct kmem_cache *kmem_cache; #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, size_t size) diff --git a/mm/slob.c b/mm/slob.c index 289be4f4681..7d272c3dcc0 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -622,8 +622,16 @@ int kmem_cache_shrink(struct kmem_cache *d) } EXPORT_SYMBOL(kmem_cache_shrink); +struct kmem_cache kmem_cache_boot = { + .name = "kmem_cache", + .size = sizeof(struct kmem_cache), + .flags = SLAB_PANIC, + .align = ARCH_KMALLOC_MINALIGN, +}; + void __init kmem_cache_init(void) { + kmem_cache = &kmem_cache_boot; slab_state = UP; } diff --git a/mm/slub.c b/mm/slub.c index 724adea3438..e0d1e047030 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3221,8 +3221,6 @@ void __kmem_cache_destroy(struct kmem_cache *s) struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; EXPORT_SYMBOL(kmalloc_caches); -static struct kmem_cache *kmem_cache; - #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; #endif -- cgit v1.2.3-70-g09d2 From 12c3667fb780e20360ad0bde32dfb3591ef609ad Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:38:33 +0000 Subject: mm/sl[aou]b: Get rid of __kmem_cache_destroy What is done there can be done in __kmem_cache_shutdown. This affects RCU handling somewhat. On rcu free all slab allocators do not refer to other management structures than the kmem_cache structure. Therefore these other structures can be freed before the rcu deferred free to the page allocator occurs. Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 46 +++++++++++++++++++++------------------------- mm/slab.h | 1 - mm/slab_common.c | 1 - mm/slob.c | 4 ---- mm/slub.c | 10 +++++----- 5 files changed, 26 insertions(+), 36 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.c b/mm/slab.c index 8ca6ec6301f..de961b48a6a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) } } -void __kmem_cache_destroy(struct kmem_cache *cachep) -{ - int i; - struct kmem_list3 *l3; - - for_each_online_cpu(i) - kfree(cachep->array[i]); - - /* NUMA: free the list3 structures */ - for_each_online_node(i) { - l3 = cachep->nodelists[i]; - if (l3) { - kfree(l3->shared); - free_alien_cache(l3->alien); - kfree(l3); - } - } -} - - /** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created @@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * - * @name must be valid until the cache is destroyed. This implies that - * the module calling this has to destroy the cache before getting unloaded. - * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) @@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, cachep->refcount = 1; if (setup_cpu_cache(cachep, gfp)) { - __kmem_cache_destroy(cachep); + __kmem_cache_shutdown(cachep); return NULL; } @@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink); int __kmem_cache_shutdown(struct kmem_cache *cachep) { - return __cache_shrink(cachep); + int i; + struct kmem_list3 *l3; + int rc = __cache_shrink(cachep); + + if (rc) + return rc; + + for_each_online_cpu(i) + kfree(cachep->array[i]); + + /* NUMA: free the list3 structures */ + for_each_online_node(i) { + l3 = cachep->nodelists[i]; + if (l3) { + kfree(l3->shared); + free_alien_cache(l3->alien); + kfree(l3); + } + } + return 0; } /* diff --git a/mm/slab.h b/mm/slab.h index 6724aa6f662..c4f9a361bd1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); int __kmem_cache_shutdown(struct kmem_cache *); -void __kmem_cache_destroy(struct kmem_cache *); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index d6deae9108c..7df814e8fbe 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -153,7 +153,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - __kmem_cache_destroy(s); kmem_cache_free(kmem_cache, s); } else { list_add(&s->list, &slab_caches); diff --git a/mm/slob.c b/mm/slob.c index cb4ab967529..50f60532270 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, return c; } -void __kmem_cache_destroy(struct kmem_cache *c) -{ -} - void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; diff --git a/mm/slub.c b/mm/slub.c index 6f932f7a821..e5e09873f5e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3205,12 +3205,12 @@ static inline int kmem_cache_close(struct kmem_cache *s) int __kmem_cache_shutdown(struct kmem_cache *s) { - return kmem_cache_close(s); -} + int rc = kmem_cache_close(s); -void __kmem_cache_destroy(struct kmem_cache *s) -{ - sysfs_slab_remove(s); + if (!rc) + sysfs_slab_remove(s); + + return rc; } /******************************************************************** -- cgit v1.2.3-70-g09d2 From cbb79694d592e9a76880f6ef6db8feccaeee1c32 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:18:32 +0000 Subject: mm/sl[aou]b: Do slab aliasing call from common code The slab aliasing logic causes some strange contortions in slub. So add a call to deal with aliases to slab_common.c but disable it for other slab allocators by providng stubs that fail to create aliases. Full general support for aliases will require additional cleanup passes and more standardization of fields in kmem_cache. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.h | 10 ++++++++++ mm/slab_common.c | 4 ++++ mm/slub.c | 15 +++++++++++---- 3 files changed, 25 insertions(+), 4 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.h b/mm/slab.h index c4f9a361bd1..84c28f451d2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache; struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +#ifdef CONFIG_SLUB +struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)); +#else +static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) +{ return NULL; } +#endif + + int __kmem_cache_shutdown(struct kmem_cache *); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index f18c06fd97c..adc42b01b25 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -115,6 +115,10 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; } + s = __kmem_cache_alias(name, size, align, flags, ctor); + if (s) + goto out_locked; + s = __kmem_cache_create(n, size, align, flags, ctor); if (s) { diff --git a/mm/slub.c b/mm/slub.c index 91c9a2fe676..64d445e7a27 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3708,7 +3708,7 @@ void __init kmem_cache_init(void) slub_max_order = 0; kmem_size = offsetof(struct kmem_cache, node) + - nr_node_ids * sizeof(struct kmem_cache_node *); + nr_node_ids * sizeof(struct kmem_cache_node *); /* Allocate two kmem_caches from the page allocator */ kmalloc_size = ALIGN(kmem_size, cache_line_size()); @@ -3922,7 +3922,7 @@ static struct kmem_cache *find_mergeable(size_t size, return NULL; } -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s; @@ -3939,11 +3939,18 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (sysfs_slab_alias(s, name)) { s->refcount--; - return NULL; + s = NULL; } - return s; } + return s; +} + +struct kmem_cache *__kmem_cache_create(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) +{ + struct kmem_cache *s; + s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); if (s) { if (kmem_cache_open(s, name, -- cgit v1.2.3-70-g09d2 From 96d17b7be0a9849d381442030886211dbb2a7061 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:18:32 +0000 Subject: mm/sl[aou]b: Move sysfs_slab_add to common Simplify locking by moving the slab_add_sysfs after all locks have been dropped. Eases the upcoming move to provide sysfs support for all allocators. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.h | 3 +++ mm/slab_common.c | 8 ++++++++ mm/slub.c | 15 ++------------- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.h b/mm/slab.h index 84c28f451d2..ec7b94429b9 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -39,10 +39,13 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +extern int sysfs_slab_add(struct kmem_cache *s); #else static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } + #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index adc42b01b25..4f722084bae 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -152,6 +152,14 @@ out_locked: return NULL; } + if (s->refcount == 1) { + err = sysfs_slab_add(s); + if (err) + printk(KERN_WARNING "kmem_cache_create(%s) failed to" + " create sysfs entry. Error %d\n", + name, err); + } + return s; } EXPORT_SYMBOL(kmem_cache_create); diff --git a/mm/slub.c b/mm/slub.c index 64d445e7a27..8d00fd78df2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -202,12 +202,10 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS -static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { } @@ -3955,16 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (s) { if (kmem_cache_open(s, name, size, align, flags, ctor)) { - int r; - - mutex_unlock(&slab_mutex); - r = sysfs_slab_add(s); - mutex_lock(&slab_mutex); - - if (!r) - return s; - - kmem_cache_close(s); + return s; } kmem_cache_free(kmem_cache, s); } @@ -5258,7 +5247,7 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -static int sysfs_slab_add(struct kmem_cache *s) +int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; -- cgit v1.2.3-70-g09d2 From 278b1bb1313664d4999a7f7d47a8a8d964862d02 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:20:34 +0000 Subject: mm/sl[aou]b: Move kmem_cache allocations into common code Shift the allocations to common code. That way the allocation and freeing of the kmem_cache structures is handled by common code. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 34 ++++++++++++++++------------------ mm/slab.h | 4 ++-- mm/slab_common.c | 18 ++++++++++-------- mm/slob.c | 42 +++++++++++++++++------------------------- mm/slub.c | 24 +++++++----------------- 5 files changed, 52 insertions(+), 70 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.c b/mm/slab.c index de961b48a6a..abc83334e5f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1676,7 +1676,8 @@ void __init kmem_cache_init(void) * bug. */ - sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1684,8 +1685,8 @@ void __init kmem_cache_init(void) list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); if (INDEX_AC != INDEX_L3) { - sizes[INDEX_L3].cs_cachep = - __kmem_cache_create(names[INDEX_L3].name, + sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1704,7 +1705,8 @@ void __init kmem_cache_init(void) * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { - sizes->cs_cachep = __kmem_cache_create(names->name, + sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes->cs_cachep, names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1712,7 +1714,8 @@ void __init kmem_cache_init(void) list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA - sizes->cs_dmacachep = __kmem_cache_create( + sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes->cs_dmacachep, names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, @@ -2356,13 +2359,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ -struct kmem_cache * -__kmem_cache_create (const char *name, size_t size, size_t align, +int +__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { size_t left_over, slab_size, ralign; - struct kmem_cache *cachep = NULL; gfp_t gfp; + int err; #if DEBUG #if FORCED_DEBUG @@ -2450,11 +2453,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, else gfp = GFP_NOWAIT; - /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(kmem_cache, gfp); - if (!cachep) - return NULL; - cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; cachep->object_size = size; cachep->align = align; @@ -2509,8 +2507,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(kmem_cache, cachep); - return NULL; + return -E2BIG; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), align); @@ -2567,9 +2564,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, cachep->name = name; cachep->refcount = 1; - if (setup_cpu_cache(cachep, gfp)) { + err = setup_cpu_cache(cachep, gfp); + if (err) { __kmem_cache_shutdown(cachep); - return NULL; + return err; } if (flags & SLAB_DEBUG_OBJECTS) { @@ -2582,7 +2580,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, slab_set_debugobj_lock_classes(cachep); } - return cachep; + return 0; } #if DEBUG diff --git a/mm/slab.h b/mm/slab.h index ec7b94429b9..077b07a24ef 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -33,8 +33,8 @@ extern struct list_head slab_caches; extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)); +extern int __kmem_cache_create(struct kmem_cache *, const char *name, + size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, diff --git a/mm/slab_common.c b/mm/slab_common.c index 4f722084bae..f50d2ed4fbf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -119,19 +119,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align if (s) goto out_locked; - s = __kmem_cache_create(n, size, align, flags, ctor); - + s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { - /* - * Check if the slab has actually been created and if it was a - * real instatiation. Aliases do not belong on the list - */ - if (s->refcount == 1) + err = __kmem_cache_create(s, n, size, align, flags, ctor); + if (!err) + list_add(&s->list, &slab_caches); + else { + kfree(n); + kmem_cache_free(kmem_cache, s); + } + } else { kfree(n); - err = -ENOSYS; /* Until __kmem_cache_create returns code */ + err = -ENOMEM; } out_locked: diff --git a/mm/slob.c b/mm/slob.c index 50f60532270..9b0cee1e847 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -508,34 +508,26 @@ size_t ksize(const void *block) } EXPORT_SYMBOL(ksize); -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { - struct kmem_cache *c; - - c = slob_alloc(sizeof(struct kmem_cache), - GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); - - if (c) { - c->name = name; - c->size = size; - if (flags & SLAB_DESTROY_BY_RCU) { - /* leave room for rcu footer at the end of object */ - c->size += sizeof(struct slob_rcu); - } - c->flags = flags; - c->ctor = ctor; - /* ignore alignment unless it's forced */ - c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; - if (c->align < ARCH_SLAB_MINALIGN) - c->align = ARCH_SLAB_MINALIGN; - if (c->align < align) - c->align = align; - - kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); - c->refcount = 1; + c->name = name; + c->size = size; + if (flags & SLAB_DESTROY_BY_RCU) { + /* leave room for rcu footer at the end of object */ + c->size += sizeof(struct slob_rcu); } - return c; + c->flags = flags; + c->ctor = ctor; + /* ignore alignment unless it's forced */ + c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; + if (c->align < ARCH_SLAB_MINALIGN) + c->align = ARCH_SLAB_MINALIGN; + if (c->align < align) + c->align = align; + + c->refcount = 1; + return 0; } void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) diff --git a/mm/slub.c b/mm/slub.c index 8d00fd78df2..0ad3fffc7d2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3034,7 +3034,6 @@ static int kmem_cache_open(struct kmem_cache *s, size_t align, unsigned long flags, void (*ctor)(void *)) { - memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; s->object_size = size; @@ -3109,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s, goto error; if (alloc_kmem_cache_cpus(s)) - return 1; + return 0; free_kmem_cache_nodes(s); error: @@ -3118,7 +3117,7 @@ error: "order=%u offset=%u flags=%lx\n", s->name, (unsigned long)size, s->size, oo_order(s->oo), s->offset, flags); - return 0; + return -EINVAL; } /* @@ -3260,13 +3259,13 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, { struct kmem_cache *s; - s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); + s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); /* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slab_mutex here. */ - if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, + if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL)) goto panic; @@ -3944,20 +3943,11 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, return s; } -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +int __kmem_cache_create(struct kmem_cache *s, + const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { - struct kmem_cache *s; - - s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); - if (s) { - if (kmem_cache_open(s, name, - size, align, flags, ctor)) { - return s; - } - kmem_cache_free(kmem_cache, s); - } - return NULL; + return kmem_cache_open(s, name, size, align, flags, ctor); } #ifdef CONFIG_SMP -- cgit v1.2.3-70-g09d2 From 8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:18:33 +0000 Subject: mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists Do the initial settings of the fields in common code. This will allow us to push more processing into common code later and improve readability. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 73 +++++++++++++++++++++++++------------------------------- mm/slab.h | 3 +-- mm/slab_common.c | 26 ++++++++++---------- mm/slob.c | 8 +++---- mm/slub.c | 39 ++++++++++++++---------------- 5 files changed, 68 insertions(+), 81 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.c b/mm/slab.c index abc83334e5f..f1f6d54e129 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void) */ sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); - + sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; + sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); + if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; + sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); } @@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void) */ if (!sizes->cs_cachep) { sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes->cs_cachep, names->name, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes->cs_cachep->name = names->name; + sizes->cs_cachep->size = sizes->cs_size; + sizes->cs_cachep->object_size = sizes->cs_size; + sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + sizes->cs_dmacachep->name = names->name_dma; + sizes->cs_dmacachep->size = sizes->cs_size; + sizes->cs_dmacachep->object_size = sizes->cs_size; + sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; __kmem_cache_create(sizes->cs_dmacachep, - names->name_dma, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| - SLAB_PANIC, - NULL); + ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); list_add(&sizes->cs_dmacachep->list, &slab_caches); #endif sizes++; @@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * as davem. */ int -__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void *)) +__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { size_t left_over, slab_size, ralign; gfp_t gfp; int err; + size_t size = cachep->size; #if DEBUG #if FORCED_DEBUG @@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ralign = ARCH_SLAB_MINALIGN; } /* 3) caller mandated alignment */ - if (ralign < align) { - ralign = align; + if (ralign < cachep->align) { + ralign = cachep->align; } /* disable debug if necessary */ if (ralign > __alignof__(unsigned long long)) @@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s /* * 4) Store it. */ - align = ralign; + cachep->align = ralign; if (slab_is_available()) gfp = GFP_KERNEL; @@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s gfp = GFP_NOWAIT; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; - cachep->object_size = size; - cachep->align = align; #if DEBUG /* @@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ flags |= CFLGS_OFF_SLAB; - size = ALIGN(size, align); + size = ALIGN(size, cachep->align); - left_over = calculate_slab_order(cachep, size, align, flags); + left_over = calculate_slab_order(cachep, size, cachep->align, flags); - if (!cachep->num) { - printk(KERN_ERR - "kmem_cache_create: couldn't create cache %s.\n", name); + if (!cachep->num) return -E2BIG; - } + slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) - + sizeof(struct slab), align); + + sizeof(struct slab), cachep->align); /* * If the slab has been placed off-slab, and we have enough space then @@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ - if (cachep->colour_off < align) - cachep->colour_off = align; + if (cachep->colour_off < cachep->align) + cachep->colour_off = cachep->align; cachep->colour = left_over / cachep->colour_off; cachep->slab_size = slab_size; cachep->flags = flags; @@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } - cachep->ctor = ctor; - cachep->name = name; cachep->refcount = 1; err = setup_cpu_cache(cachep, gfp); diff --git a/mm/slab.h b/mm/slab.h index 077b07a24ef..67aeaa2d39c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -33,8 +33,7 @@ extern struct list_head slab_caches; extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ -extern int __kmem_cache_create(struct kmem_cache *, const char *name, - size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, diff --git a/mm/slab_common.c b/mm/slab_common.c index f50d2ed4fbf..8a85a19d90e 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align { struct kmem_cache *s = NULL; int err = 0; - char *n; get_online_cpus(); mutex_lock(&slab_mutex); @@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; - n = kstrdup(name, GFP_KERNEL); - if (!n) { - err = -ENOMEM; - goto out_locked; - } - s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_locked; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { - err = __kmem_cache_create(s, n, size, align, flags, ctor); + s->object_size = s->size = size; + s->align = align; + s->ctor = ctor; + s->name = kstrdup(name, GFP_KERNEL); + if (!s->name) { + kmem_cache_free(kmem_cache, s); + err = -ENOMEM; + goto out_locked; + } + + err = __kmem_cache_create(s, flags); if (!err) list_add(&s->list, &slab_caches); else { - kfree(n); + kfree(s->name); kmem_cache_free(kmem_cache, s); } - - } else { - kfree(n); + } else err = -ENOMEM; - } out_locked: mutex_unlock(&slab_mutex); diff --git a/mm/slob.c b/mm/slob.c index 9b0cee1e847..cac05d92f32 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -508,17 +508,15 @@ size_t ksize(const void *block) } EXPORT_SYMBOL(ksize); -int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) { - c->name = name; - c->size = size; + size_t align = c->size; + if (flags & SLAB_DESTROY_BY_RCU) { /* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } c->flags = flags; - c->ctor = ctor; /* ignore alignment unless it's forced */ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < ARCH_SLAB_MINALIGN) diff --git a/mm/slub.c b/mm/slub.c index 0ad3fffc7d2..d8ee419d5a1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) } -static int kmem_cache_open(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(void *)) +static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) { - s->name = name; - s->ctor = ctor; - s->object_size = size; - s->align = align; - s->flags = kmem_cache_flags(size, flags, name, ctor); + s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->reserved = 0; if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) @@ -3115,7 +3108,7 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", - s->name, (unsigned long)size, s->size, oo_order(s->oo), + s->name, (unsigned long)s->size, s->size, oo_order(s->oo), s->offset, flags); return -EINVAL; } @@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + s->name = name; + s->size = s->object_size = size; + s->align = ARCH_KMALLOC_MINALIGN; + /* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slab_mutex here. */ - if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) + if (kmem_cache_open(s, flags)) goto panic; list_add(&s->list, &slab_caches); @@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void) */ kmem_cache_node = (void *)kmem_cache + kmalloc_size; - kmem_cache_open(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache_node->name = "kmem_cache_node"; + kmem_cache_node->size = kmem_cache_node->object_size = + sizeof(struct kmem_cache_node); + kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); @@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void) slab_state = PARTIAL; temp_kmem_cache = kmem_cache; - kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache->name = "kmem_cache"; + kmem_cache->size = kmem_cache->object_size = kmem_size; + kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache, temp_kmem_cache, kmem_size); @@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, return s; } -int __kmem_cache_create(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) { - return kmem_cache_open(s, name, size, align, flags, ctor); + return kmem_cache_open(s, flags); } #ifdef CONFIG_SMP -- cgit v1.2.3-70-g09d2 From aac3a1664aba429f47c70edfc76ee10fcd808471 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 5 Sep 2012 12:07:44 +0300 Subject: Revert "mm/sl[aou]b: Move sysfs_slab_add to common" This reverts commit 96d17b7be0a9849d381442030886211dbb2a7061 which caused the following errors at boot: [ 1.114885] kobject (ffff88001a802578): tried to init an initialized object, something is seriously wrong. [ 1.114885] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.114885] Call Trace: [ 1.114885] [] kobject_init+0x87/0xa0 [ 1.115555] [] kobject_init_and_add+0x2a/0x90 [ 1.115555] [] ? sprintf+0x40/0x50 [ 1.115555] [] sysfs_slab_add+0x80/0x210 [ 1.115555] [] kmem_cache_create+0xa5/0x250 [ 1.115555] [] ? md_init+0x144/0x144 [ 1.115555] [] local_init+0xa4/0x11b [ 1.115555] [] dm_init+0x14/0x45 [ 1.115836] [] do_one_initcall+0x3a/0x160 [ 1.116834] [] kernel_init+0x133/0x1b7 [ 1.117835] [] ? do_early_param+0x86/0x86 [ 1.117835] [] kernel_thread_helper+0x4/0x10 [ 1.118401] [] ? start_kernel+0x33f/0x33f [ 1.119832] [] ? gs_change+0xb/0xb [ 1.120325] ------------[ cut here ]------------ [ 1.120835] WARNING: at fs/sysfs/dir.c:536 sysfs_add_one+0xc1/0xf0() [ 1.121437] sysfs: cannot create duplicate filename '/kernel/slab/:t-0000016' [ 1.121831] Modules linked in: [ 1.122138] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.122831] Call Trace: [ 1.123074] [] ? sysfs_add_one+0xc1/0xf0 [ 1.123833] [] warn_slowpath_common+0x7a/0xb0 [ 1.124405] [] warn_slowpath_fmt+0x41/0x50 [ 1.124832] [] sysfs_add_one+0xc1/0xf0 [ 1.125337] [] create_dir+0x73/0xd0 [ 1.125832] [] sysfs_create_dir+0x81/0xe0 [ 1.126363] [] kobject_add_internal+0x9d/0x210 [ 1.126832] [] kobject_init_and_add+0x63/0x90 [ 1.127406] [] sysfs_slab_add+0x80/0x210 [ 1.127832] [] kmem_cache_create+0xa5/0x250 [ 1.128384] [] ? md_init+0x144/0x144 [ 1.128833] [] local_init+0xa4/0x11b [ 1.129831] [] dm_init+0x14/0x45 [ 1.130305] [] do_one_initcall+0x3a/0x160 [ 1.130831] [] kernel_init+0x133/0x1b7 [ 1.131351] [] ? do_early_param+0x86/0x86 [ 1.131830] [] kernel_thread_helper+0x4/0x10 [ 1.132392] [] ? start_kernel+0x33f/0x33f [ 1.132830] [] ? gs_change+0xb/0xb [ 1.133315] ---[ end trace 2703540871c8fab7 ]--- [ 1.133830] ------------[ cut here ]------------ [ 1.134274] WARNING: at lib/kobject.c:196 kobject_add_internal+0x1f5/0x210() [ 1.134829] kobject_add_internal failed for :t-0000016 with -EEXIST, don't try to register things with the same name in the same directory. [ 1.135829] Modules linked in: [ 1.136135] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.136828] Call Trace: [ 1.137071] [] ? kobject_add_internal+0x1f5/0x210 [ 1.137830] [] warn_slowpath_common+0x7a/0xb0 [ 1.138402] [] warn_slowpath_fmt+0x41/0x50 [ 1.138830] [] ? release_sysfs_dirent+0x73/0xf0 [ 1.139419] [] kobject_add_internal+0x1f5/0x210 [ 1.139830] [] kobject_init_and_add+0x63/0x90 [ 1.140429] [] sysfs_slab_add+0x80/0x210 [ 1.140830] [] kmem_cache_create+0xa5/0x250 [ 1.141829] [] ? md_init+0x144/0x144 [ 1.142307] [] local_init+0xa4/0x11b [ 1.142829] [] dm_init+0x14/0x45 [ 1.143307] [] do_one_initcall+0x3a/0x160 [ 1.143829] [] kernel_init+0x133/0x1b7 [ 1.144352] [] ? do_early_param+0x86/0x86 [ 1.144829] [] kernel_thread_helper+0x4/0x10 [ 1.145405] [] ? start_kernel+0x33f/0x33f [ 1.145828] [] ? gs_change+0xb/0xb [ 1.146313] ---[ end trace 2703540871c8fab8 ]--- Conflicts: mm/slub.c Signed-off-by: Pekka Enberg --- mm/slab.h | 3 --- mm/slab_common.c | 8 -------- mm/slub.c | 19 +++++++++++++++++-- 3 files changed, 17 insertions(+), 13 deletions(-) (limited to 'mm/slab.h') diff --git a/mm/slab.h b/mm/slab.h index 67aeaa2d39c..7deeb449a30 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -38,13 +38,10 @@ extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); -extern int sysfs_slab_add(struct kmem_cache *s); #else static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } -static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } - #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index 651a3c60847..9c217255ac4 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -155,14 +155,6 @@ out_locked: return NULL; } - if (s->refcount == 1) { - err = sysfs_slab_add(s); - if (err) - printk(KERN_WARNING "kmem_cache_create(%s) failed to" - " create sysfs entry. Error %d\n", - name, err); - } - return s; } EXPORT_SYMBOL(kmem_cache_create); diff --git a/mm/slub.c b/mm/slub.c index 0b122d8ec21..dafd465f7a3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -202,10 +202,12 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS +static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { } @@ -3943,7 +3945,20 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) { - return kmem_cache_open(s, flags); + int err; + + err = kmem_cache_open(s, flags); + if (err) + return err; + + mutex_unlock(&slab_mutex); + err = sysfs_slab_add(s); + mutex_lock(&slab_mutex); + + if (err) + kmem_cache_close(s); + + return err; } #ifdef CONFIG_SMP @@ -5233,7 +5248,7 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -int sysfs_slab_add(struct kmem_cache *s) +static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; -- cgit v1.2.3-70-g09d2