From d9b7f22623b5fa9cc189581dcdfb2ac605933bf4 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 3 Aug 2012 22:51:37 +0400 Subject: slub: use free_page instead of put_page for freeing kmalloc allocation When freeing objects, the slub allocator will most of the time free empty pages by calling __free_pages(). But high-order kmalloc will be diposed by means of put_page() instead. It makes no sense to call put_page() in kernel pages that are provided by the object allocators, so we shouldn't be doing this ourselves. Aside from the consistency change, we don't change the flow too much. put_page()'s would call its dtor function, which is __free_pages. We also already do all of the Compound page tests ourselves, and the Mlock test we lose don't really matter. Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes CC: Pekka Enberg Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 8f78e257703..c83fe96f5e4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3477,7 +3477,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kmemleak_free(x); - put_page(page); + __free_pages(page, compound_order(page)); return; } slab_free(page->slab, page, object, _RET_IP_); -- cgit v1.2.3-70-g09d2 From 19c7ff9ecd89441096dab6a56f926f7df8ba850a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 30 May 2012 12:54:46 -0500 Subject: slub: Take node lock during object free checks Only applies to scenarios where debugging is on: Validation of slabs can currently occur while debugging information is updated from the fast paths of the allocator. This results in various races where we get false reports about slab metadata not being in order. This patch makes the fast paths take the node lock so that serialization with slab validation will occur. Causes additional slowdown in debug scenarios. Reported-by: Waiman Long Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index c83fe96f5e4..e131084e87a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1069,13 +1069,13 @@ bad: return 0; } -static noinline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) +static noinline struct kmem_cache_node *free_debug_processing( + struct kmem_cache *s, struct page *page, void *object, + unsigned long addr, unsigned long *flags) { - unsigned long flags; - int rc = 0; + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - local_irq_save(flags); + spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1113,15 +1113,19 @@ static noinline int free_debug_processing(struct kmem_cache *s, set_track(s, object, TRACK_FREE, addr); trace(s, page, object, 0); init_object(s, object, SLUB_RED_INACTIVE); - rc = 1; out: slab_unlock(page); - local_irq_restore(flags); - return rc; + /* + * Keep node_lock to preserve integrity + * until the object is actually freed + */ + return n; fail: + slab_unlock(page); + spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); - goto out; + return NULL; } static int __init setup_slub_debug(char *str) @@ -1214,8 +1218,9 @@ static inline void setup_object_debug(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } -static inline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) { return 0; } +static inline struct kmem_cache_node *free_debug_processing( + struct kmem_cache *s, struct page *page, void *object, + unsigned long addr, unsigned long *flags) { return NULL; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } @@ -2452,7 +2457,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); - if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) + if (kmem_cache_debug(s) && + !(n = free_debug_processing(s, page, x, addr, &flags))) return; do { -- cgit v1.2.3-70-g09d2 From e24fc410f58cc7851188a6e996dc6ce5c4259eb4 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Sat, 23 Jun 2012 03:22:38 +0900 Subject: slub: reduce failure of this_cpu_cmpxchg in put_cpu_partial() after unfreezing In current implementation, after unfreezing, we doesn't touch oldpage, so it remain 'NOT NULL'. When we call this_cpu_cmpxchg() with this old oldpage, this_cpu_cmpxchg() is mostly be failed. We can change value of oldpage to NULL after unfreezing, because unfreeze_partial() ensure that all the cpu partial slabs is removed from cpu partial list. In this time, we could expect that this_cpu_cmpxchg is mostly succeed. Acked-by: Christoph Lameter Signed-off-by: Joonsoo Kim Signed-off-by: Pekka Enberg --- mm/slub.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index e131084e87a..c67bd0a4a95 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1962,6 +1962,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) local_irq_save(flags); unfreeze_partials(s); local_irq_restore(flags); + oldpage = NULL; pobjects = 0; pages = 0; stat(s, CPU_PARTIAL_DRAIN); -- cgit v1.2.3-70-g09d2 From 79576102afc24fcc6627d7a15691e432d9a2eacb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:06:14 +0000 Subject: mm/slub: Add debugging to verify correct cache use on kmem_cache_free() Add additional debugging to check that the objects is actually from the cache the caller claims. Doing so currently trips up some other debugging code. It takes a lot to infer from that what was happening. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter [ penberg@kernel.org: Use pr_err() ] Signed-off-by: Pekka Enberg --- mm/slub.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index c67bd0a4a95..99059217434 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2614,6 +2614,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); + if (kmem_cache_debug(s) && page->slab != s) { + pr_err("kmem_cache_free: Wrong slab cache. %s but object" + " is from %s\n", page->slab->name, s->name); + WARN_ON_ONCE(1); + return; + } + slab_free(s, page, x, _RET_IP_); trace_kmem_cache_free(_RET_IP_, x); -- cgit v1.2.3-70-g09d2 From 208c4358dc4a8f0fe99e49eb8d21a869b01e7d34 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:06:14 +0000 Subject: mm/slub: Use kmem_cache for the kmem_cache structure Do not use kmalloc() but kmem_cache_alloc() for the allocation of the kmem_cache structures in slub. Reviewed-by: Glauber Costa Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 99059217434..c6690898321 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -213,7 +213,7 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) static inline void sysfs_slab_remove(struct kmem_cache *s) { kfree(s->name); - kfree(s); + kmem_cache_free(kmem_cache, s); } #endif @@ -3969,7 +3969,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (!n) return NULL; - s = kmalloc(kmem_size, GFP_KERNEL); + s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); if (s) { if (kmem_cache_open(s, n, size, align, flags, ctor)) { @@ -3986,7 +3986,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, list_del(&s->list); kmem_cache_close(s); } - kfree(s); + kmem_cache_free(kmem_cache, s); } kfree(n); return NULL; @@ -5224,7 +5224,7 @@ static void kmem_cache_release(struct kobject *kobj) struct kmem_cache *s = to_slab(kobj); kfree(s->name); - kfree(s); + kmem_cache_free(kmem_cache, s); } static const struct sysfs_ops slab_sysfs_ops = { -- cgit v1.2.3-70-g09d2 From 7c9adf5a5471647f392169ef19d3e81dcfa76045 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:38:33 +0000 Subject: mm/sl[aou]b: Move list_add() to slab_common.c Move the code to append the new kmem_cache to the list of slab caches to the kmem_cache_create code in the shared code. This is possible now since the acquisition of the mutex was moved into kmem_cache_create(). Acked-by: David Rientjes Reviewed-by: Glauber Costa Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 7 +++++-- mm/slab_common.c | 7 +++++++ mm/slob.c | 4 ++++ mm/slub.c | 2 -- 4 files changed, 16 insertions(+), 4 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index 3b4587bb7b1..a6990316849 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1680,6 +1680,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); + list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = __kmem_cache_create(names[INDEX_L3].name, @@ -1687,6 +1688,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); + list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); } slab_early_init = 0; @@ -1705,6 +1707,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); + list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = __kmem_cache_create( @@ -1714,6 +1717,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, NULL); + list_add(&sizes->cs_dmacachep->list, &slab_caches); #endif sizes++; names++; @@ -2583,6 +2587,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, } cachep->ctor = ctor; cachep->name = name; + cachep->refcount = 1; if (setup_cpu_cache(cachep, gfp)) { __kmem_cache_destroy(cachep); @@ -2599,8 +2604,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, slab_set_debugobj_lock_classes(cachep); } - /* cache setup completed, link it into the list */ - list_add(&cachep->list, &slab_caches); return cachep; } diff --git a/mm/slab_common.c b/mm/slab_common.c index fe8dc943c28..5190a7cd02b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -111,6 +111,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align if (!s) err = -ENOSYS; /* Until __kmem_cache_create returns code */ + /* + * Check if the slab has actually been created and if it was a + * real instatiation. Aliases do not belong on the list + */ + if (s && s->refcount == 1) + list_add(&s->list, &slab_caches); + out_locked: mutex_unlock(&slab_mutex); put_online_cpus(); diff --git a/mm/slob.c b/mm/slob.c index 45d4ca79933..5225d28f269 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -540,6 +540,10 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, void kmem_cache_destroy(struct kmem_cache *c) { + mutex_lock(&slab_mutex); + list_del(&c->list); + mutex_unlock(&slab_mutex); + kmemleak_free(c); if (c->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); diff --git a/mm/slub.c b/mm/slub.c index c6690898321..24aa362edef 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3975,7 +3975,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size, align, flags, ctor)) { int r; - list_add(&s->list, &slab_caches); mutex_unlock(&slab_mutex); r = sysfs_slab_add(s); mutex_lock(&slab_mutex); @@ -3983,7 +3982,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (!r) return s; - list_del(&s->list); kmem_cache_close(s); } kmem_cache_free(kmem_cache, s); -- cgit v1.2.3-70-g09d2 From 945cf2b6199be70ff03102b9e642c3bb05d01de9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:18:33 +0000 Subject: mm/sl[aou]b: Extract a common function for kmem_cache_destroy kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 45 +++------------------------------------------ mm/slab.h | 3 +++ mm/slab_common.c | 25 +++++++++++++++++++++++++ mm/slob.c | 15 +++++++-------- mm/slub.c | 36 +++++++++++------------------------- 5 files changed, 49 insertions(+), 75 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index a6990316849..49a74b349e3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2206,7 +2206,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) } } -static void __kmem_cache_destroy(struct kmem_cache *cachep) +void __kmem_cache_destroy(struct kmem_cache *cachep) { int i; struct kmem_list3 *l3; @@ -2763,49 +2763,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep) } EXPORT_SYMBOL(kmem_cache_shrink); -/** - * kmem_cache_destroy - delete a cache - * @cachep: the cache to destroy - * - * Remove a &struct kmem_cache object from the slab cache. - * - * It is expected this function will be called by a module when it is - * unloaded. This will remove the cache completely, and avoid a duplicate - * cache being allocated each time a module is loaded and unloaded, if the - * module doesn't have persistent in-kernel storage across loads and unloads. - * - * The cache must be empty before calling this function. - * - * The caller must guarantee that no one will allocate memory from the cache - * during the kmem_cache_destroy(). - */ -void kmem_cache_destroy(struct kmem_cache *cachep) +int __kmem_cache_shutdown(struct kmem_cache *cachep) { - BUG_ON(!cachep || in_interrupt()); - - /* Find the cache in the chain of caches. */ - get_online_cpus(); - mutex_lock(&slab_mutex); - /* - * the chain is never empty, cache_cache is never destroyed - */ - list_del(&cachep->list); - if (__cache_shrink(cachep)) { - slab_error(cachep, "Can't free all objects"); - list_add(&cachep->list, &slab_caches); - mutex_unlock(&slab_mutex); - put_online_cpus(); - return; - } - - if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - rcu_barrier(); - - __kmem_cache_destroy(cachep); - mutex_unlock(&slab_mutex); - put_online_cpus(); + return __cache_shrink(cachep); } -EXPORT_SYMBOL(kmem_cache_destroy); /* * Get the memory for a slab management obj. diff --git a/mm/slab.h b/mm/slab.h index db7848caaa2..07a537ed5da 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -30,4 +30,7 @@ extern struct list_head slab_caches; struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +int __kmem_cache_shutdown(struct kmem_cache *); +void __kmem_cache_destroy(struct kmem_cache *); + #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index 5190a7cd02b..a1c4f0b5aae 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -140,6 +140,31 @@ out_locked: } EXPORT_SYMBOL(kmem_cache_create); +void kmem_cache_destroy(struct kmem_cache *s) +{ + get_online_cpus(); + mutex_lock(&slab_mutex); + s->refcount--; + if (!s->refcount) { + list_del(&s->list); + + if (!__kmem_cache_shutdown(s)) { + if (s->flags & SLAB_DESTROY_BY_RCU) + rcu_barrier(); + + __kmem_cache_destroy(s); + } else { + list_add(&s->list, &slab_caches); + printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", + s->name); + dump_stack(); + } + } + mutex_unlock(&slab_mutex); + put_online_cpus(); +} +EXPORT_SYMBOL(kmem_cache_destroy); + int slab_is_available(void) { return slab_state >= UP; diff --git a/mm/slob.c b/mm/slob.c index 5225d28f269..289be4f4681 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -538,18 +538,11 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, return c; } -void kmem_cache_destroy(struct kmem_cache *c) +void __kmem_cache_destroy(struct kmem_cache *c) { - mutex_lock(&slab_mutex); - list_del(&c->list); - mutex_unlock(&slab_mutex); - kmemleak_free(c); - if (c->flags & SLAB_DESTROY_BY_RCU) - rcu_barrier(); slob_free(c, sizeof(struct kmem_cache)); } -EXPORT_SYMBOL(kmem_cache_destroy); void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { @@ -617,6 +610,12 @@ unsigned int kmem_cache_size(struct kmem_cache *c) } EXPORT_SYMBOL(kmem_cache_size); +int __kmem_cache_shutdown(struct kmem_cache *c) +{ + /* No way to check for remaining objects */ + return 0; +} + int kmem_cache_shrink(struct kmem_cache *d) { return 0; diff --git a/mm/slub.c b/mm/slub.c index 24aa362edef..724adea3438 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page, print_trailer(s, page, object); } -static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) +static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) { va_list args; char buf[100]; @@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, sizeof(long), GFP_ATOMIC); if (!map) return; - slab_err(s, page, "%s", text); + slab_err(s, page, text, s->name); slab_lock(page); get_map(s, page, map); @@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) discard_slab(s, page); } else { list_slab_objects(s, page, - "Objects remaining on kmem_cache_close()"); + "Objects remaining in %s on kmem_cache_close()"); } } } @@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s) int node; flush_all(s); - free_percpu(s->cpu_slab); /* Attempt to free all objects */ for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); @@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s) if (n->nr_partial || slabs_node(s, node)) return 1; } + free_percpu(s->cpu_slab); free_kmem_cache_nodes(s); return 0; } -/* - * Close a cache and release the kmem_cache structure - * (must be used for caches created using kmem_cache_create) - */ -void kmem_cache_destroy(struct kmem_cache *s) +int __kmem_cache_shutdown(struct kmem_cache *s) { - mutex_lock(&slab_mutex); - s->refcount--; - if (!s->refcount) { - list_del(&s->list); - mutex_unlock(&slab_mutex); - if (kmem_cache_close(s)) { - printk(KERN_ERR "SLUB %s: %s called for cache that " - "still has objects.\n", s->name, __func__); - dump_stack(); - } - if (s->flags & SLAB_DESTROY_BY_RCU) - rcu_barrier(); - sysfs_slab_remove(s); - } else - mutex_unlock(&slab_mutex); + return kmem_cache_close(s); +} + +void __kmem_cache_destroy(struct kmem_cache *s) +{ + sysfs_slab_remove(s); } -EXPORT_SYMBOL(kmem_cache_destroy); /******************************************************************** * Kmalloc subsystem -- cgit v1.2.3-70-g09d2 From 9b030cb865f137e1574596983face2a07e41e8b2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:20:33 +0000 Subject: mm/sl[aou]b: Use "kmem_cache" name for slab cache with kmem_cache struct Make all allocators use the "kmem_cache" slabname for the "kmem_cache" structure. Reviewed-by: Glauber Costa Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 72 +++++++++++++++++++++++++++++--------------------------- mm/slab.h | 6 +++++ mm/slab_common.c | 1 + mm/slob.c | 8 +++++++ mm/slub.c | 2 -- 5 files changed, 52 insertions(+), 37 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index 49a74b349e3..ef94799a1aa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; -static struct kmem_cache cache_cache = { - .nodelists = cache_cache_nodelists, +static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; +static struct kmem_cache kmem_cache_boot = { + .nodelists = kmem_cache_nodelists, .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, @@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void) int order; int node; + kmem_cache = &kmem_cache_boot; + if (num_possible_nodes() == 1) use_alien_caches = 0; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); if (i < MAX_NUMNODES) - cache_cache.nodelists[i] = NULL; + kmem_cache->nodelists[i] = NULL; } - set_up_list3s(&cache_cache, CACHE_CACHE); + set_up_list3s(kmem_cache, CACHE_CACHE); /* * Fragmentation resistance on low memory - only use bigger @@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void) /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: - * 1) initialize the cache_cache cache: it contains the struct - * kmem_cache structures of all caches, except cache_cache itself: - * cache_cache is statically allocated. + * 1) initialize the kmem_cache cache: it contains the struct + * kmem_cache structures of all caches, except kmem_cache itself: + * kmem_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. @@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void) * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. - * 4) Replace the __init data head arrays for cache_cache and the first + * 4) Replace the __init data head arrays for kmem_cache and the first * kmalloc cache with kmalloc allocated arrays. - * 5) Replace the __init data for kmem_list3 for cache_cache and + * 5) Replace the __init data for kmem_list3 for kmem_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ node = numa_mem_id(); - /* 1) create the cache_cache */ + /* 1) create the kmem_cache */ INIT_LIST_HEAD(&slab_caches); - list_add(&cache_cache.list, &slab_caches); - cache_cache.colour_off = cache_line_size(); - cache_cache.array[smp_processor_id()] = &initarray_cache.cache; - cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; + list_add(&kmem_cache->list, &slab_caches); + kmem_cache->colour_off = cache_line_size(); + kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; + kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ - cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + + kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + nr_node_ids * sizeof(struct kmem_list3 *); - cache_cache.object_size = cache_cache.size; - cache_cache.size = ALIGN(cache_cache.size, + kmem_cache->object_size = kmem_cache->size; + kmem_cache->size = ALIGN(kmem_cache->object_size, cache_line_size()); - cache_cache.reciprocal_buffer_size = - reciprocal_value(cache_cache.size); + kmem_cache->reciprocal_buffer_size = + reciprocal_value(kmem_cache->size); for (order = 0; order < MAX_ORDER; order++) { - cache_estimate(order, cache_cache.size, - cache_line_size(), 0, &left_over, &cache_cache.num); - if (cache_cache.num) + cache_estimate(order, kmem_cache->size, + cache_line_size(), 0, &left_over, &kmem_cache->num); + if (kmem_cache->num) break; } - BUG_ON(!cache_cache.num); - cache_cache.gfporder = order; - cache_cache.colour = left_over / cache_cache.colour_off; - cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + + BUG_ON(!kmem_cache->num); + kmem_cache->gfporder = order; + kmem_cache->colour = left_over / kmem_cache->colour_off; + kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ @@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, cpu_cache_get(&cache_cache), + BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(kmem_cache), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - cache_cache.array[smp_processor_id()] = ptr; + kmem_cache->array[smp_processor_id()] = ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); @@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void) int nid; for_each_online_node(nid) { - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); + init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); @@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep) kfree(l3); } } - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); } @@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, gfp = GFP_NOWAIT; /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(&cache_cache, gfp); + cachep = kmem_cache_zalloc(kmem_cache, gfp); if (!cachep) return NULL; @@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(&cache_cache, cachep); + kmem_cache_free(kmem_cache, cachep); return NULL; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) @@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { - if (cachep == &cache_cache) + if (cachep == kmem_cache) return false; return should_failslab(cachep->object_size, flags, cachep->flags); diff --git a/mm/slab.h b/mm/slab.h index 07a537ed5da..6724aa6f662 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -25,8 +25,14 @@ extern enum slab_state slab_state; /* The slab cache mutex protects the management structures during changes */ extern struct mutex slab_mutex; + +/* The list of all slab caches on the system */ extern struct list_head slab_caches; +/* The slab cache that manages slab cache information */ +extern struct kmem_cache *kmem_cache; + +/* Functions provided by the slab allocators */ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index a1c4f0b5aae..5374150f548 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -22,6 +22,7 @@ enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); +struct kmem_cache *kmem_cache; #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, size_t size) diff --git a/mm/slob.c b/mm/slob.c index 289be4f4681..7d272c3dcc0 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -622,8 +622,16 @@ int kmem_cache_shrink(struct kmem_cache *d) } EXPORT_SYMBOL(kmem_cache_shrink); +struct kmem_cache kmem_cache_boot = { + .name = "kmem_cache", + .size = sizeof(struct kmem_cache), + .flags = SLAB_PANIC, + .align = ARCH_KMALLOC_MINALIGN, +}; + void __init kmem_cache_init(void) { + kmem_cache = &kmem_cache_boot; slab_state = UP; } diff --git a/mm/slub.c b/mm/slub.c index 724adea3438..e0d1e047030 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3221,8 +3221,6 @@ void __kmem_cache_destroy(struct kmem_cache *s) struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; EXPORT_SYMBOL(kmalloc_caches); -static struct kmem_cache *kmem_cache; - #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; #endif -- cgit v1.2.3-70-g09d2 From 8f4c765c22deee766319ae9a1db68325f14816e6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:18:32 +0000 Subject: mm/sl[aou]b: Move freeing of kmem_cache structure to common code The freeing action is basically the same in all slab allocators. Move to the common kmem_cache_destroy() function. Reviewed-by: Glauber Costa Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 1 - mm/slab_common.c | 1 + mm/slob.c | 2 -- mm/slub.c | 2 -- 4 files changed, 1 insertion(+), 5 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index ef94799a1aa..8ca6ec6301f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2225,7 +2225,6 @@ void __kmem_cache_destroy(struct kmem_cache *cachep) kfree(l3); } } - kmem_cache_free(kmem_cache, cachep); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 5374150f548..d6deae9108c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -154,6 +154,7 @@ void kmem_cache_destroy(struct kmem_cache *s) rcu_barrier(); __kmem_cache_destroy(s); + kmem_cache_free(kmem_cache, s); } else { list_add(&s->list, &slab_caches); printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", diff --git a/mm/slob.c b/mm/slob.c index 7d272c3dcc0..cb4ab967529 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -540,8 +540,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, void __kmem_cache_destroy(struct kmem_cache *c) { - kmemleak_free(c); - slob_free(c, sizeof(struct kmem_cache)); } void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) diff --git a/mm/slub.c b/mm/slub.c index e0d1e047030..6f932f7a821 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -213,7 +213,6 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) static inline void sysfs_slab_remove(struct kmem_cache *s) { kfree(s->name); - kmem_cache_free(kmem_cache, s); } #endif @@ -5206,7 +5205,6 @@ static void kmem_cache_release(struct kobject *kobj) struct kmem_cache *s = to_slab(kobj); kfree(s->name); - kmem_cache_free(kmem_cache, s); } static const struct sysfs_ops slab_sysfs_ops = { -- cgit v1.2.3-70-g09d2 From 12c3667fb780e20360ad0bde32dfb3591ef609ad Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:38:33 +0000 Subject: mm/sl[aou]b: Get rid of __kmem_cache_destroy What is done there can be done in __kmem_cache_shutdown. This affects RCU handling somewhat. On rcu free all slab allocators do not refer to other management structures than the kmem_cache structure. Therefore these other structures can be freed before the rcu deferred free to the page allocator occurs. Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 46 +++++++++++++++++++++------------------------- mm/slab.h | 1 - mm/slab_common.c | 1 - mm/slob.c | 4 ---- mm/slub.c | 10 +++++----- 5 files changed, 26 insertions(+), 36 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index 8ca6ec6301f..de961b48a6a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) } } -void __kmem_cache_destroy(struct kmem_cache *cachep) -{ - int i; - struct kmem_list3 *l3; - - for_each_online_cpu(i) - kfree(cachep->array[i]); - - /* NUMA: free the list3 structures */ - for_each_online_node(i) { - l3 = cachep->nodelists[i]; - if (l3) { - kfree(l3->shared); - free_alien_cache(l3->alien); - kfree(l3); - } - } -} - - /** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created @@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * - * @name must be valid until the cache is destroyed. This implies that - * the module calling this has to destroy the cache before getting unloaded. - * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) @@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, cachep->refcount = 1; if (setup_cpu_cache(cachep, gfp)) { - __kmem_cache_destroy(cachep); + __kmem_cache_shutdown(cachep); return NULL; } @@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink); int __kmem_cache_shutdown(struct kmem_cache *cachep) { - return __cache_shrink(cachep); + int i; + struct kmem_list3 *l3; + int rc = __cache_shrink(cachep); + + if (rc) + return rc; + + for_each_online_cpu(i) + kfree(cachep->array[i]); + + /* NUMA: free the list3 structures */ + for_each_online_node(i) { + l3 = cachep->nodelists[i]; + if (l3) { + kfree(l3->shared); + free_alien_cache(l3->alien); + kfree(l3); + } + } + return 0; } /* diff --git a/mm/slab.h b/mm/slab.h index 6724aa6f662..c4f9a361bd1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); int __kmem_cache_shutdown(struct kmem_cache *); -void __kmem_cache_destroy(struct kmem_cache *); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index d6deae9108c..7df814e8fbe 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -153,7 +153,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - __kmem_cache_destroy(s); kmem_cache_free(kmem_cache, s); } else { list_add(&s->list, &slab_caches); diff --git a/mm/slob.c b/mm/slob.c index cb4ab967529..50f60532270 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, return c; } -void __kmem_cache_destroy(struct kmem_cache *c) -{ -} - void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; diff --git a/mm/slub.c b/mm/slub.c index 6f932f7a821..e5e09873f5e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3205,12 +3205,12 @@ static inline int kmem_cache_close(struct kmem_cache *s) int __kmem_cache_shutdown(struct kmem_cache *s) { - return kmem_cache_close(s); -} + int rc = kmem_cache_close(s); -void __kmem_cache_destroy(struct kmem_cache *s) -{ - sysfs_slab_remove(s); + if (!rc) + sysfs_slab_remove(s); + + return rc; } /******************************************************************** -- cgit v1.2.3-70-g09d2 From db265eca77000c5dafc5608975afe8dafb2a02d5 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:18:33 +0000 Subject: mm/sl[aou]b: Move duping of slab name to slab_common.c Duping of the slabname has to be done by each slab. Moving this code to slab_common avoids duplicate implementations. With this patch we have common string handling for all slab allocators. Strings passed to kmem_cache_create() are copied internally. Subsystems can create temporary strings to create slab caches. Slabs allocated in early states of bootstrap will never be freed (and those can never be freed since they are essential to slab allocator operations). During bootstrap we therefore do not have to worry about duping names. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab_common.c | 30 +++++++++++++++++++++--------- mm/slub.c | 21 ++------------------- 2 files changed, 23 insertions(+), 28 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab_common.c b/mm/slab_common.c index 7df814e8fbe..f18c06fd97c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -100,6 +100,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align { struct kmem_cache *s = NULL; int err = 0; + char *n; get_online_cpus(); mutex_lock(&slab_mutex); @@ -108,16 +109,26 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; - s = __kmem_cache_create(name, size, align, flags, ctor); - if (!s) - err = -ENOSYS; /* Until __kmem_cache_create returns code */ + n = kstrdup(name, GFP_KERNEL); + if (!n) { + err = -ENOMEM; + goto out_locked; + } + + s = __kmem_cache_create(n, size, align, flags, ctor); + + if (s) { + /* + * Check if the slab has actually been created and if it was a + * real instatiation. Aliases do not belong on the list + */ + if (s->refcount == 1) + list_add(&s->list, &slab_caches); - /* - * Check if the slab has actually been created and if it was a - * real instatiation. Aliases do not belong on the list - */ - if (s && s->refcount == 1) - list_add(&s->list, &slab_caches); + } else { + kfree(n); + err = -ENOSYS; /* Until __kmem_cache_create returns code */ + } out_locked: mutex_unlock(&slab_mutex); @@ -153,6 +164,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); + kfree(s->name); kmem_cache_free(kmem_cache, s); } else { list_add(&s->list, &slab_caches); diff --git a/mm/slub.c b/mm/slub.c index e5e09873f5e..91c9a2fe676 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -210,10 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *); static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static inline void sysfs_slab_remove(struct kmem_cache *s) -{ - kfree(s->name); -} +static inline void sysfs_slab_remove(struct kmem_cache *s) { } #endif @@ -3929,7 +3926,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s; - char *n; s = find_mergeable(size, align, flags, name, ctor); if (s) { @@ -3948,13 +3944,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, return s; } - n = kstrdup(name, GFP_KERNEL); - if (!n) - return NULL; - s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); if (s) { - if (kmem_cache_open(s, n, + if (kmem_cache_open(s, name, size, align, flags, ctor)) { int r; @@ -3969,7 +3961,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, } kmem_cache_free(kmem_cache, s); } - kfree(n); return NULL; } @@ -5200,13 +5191,6 @@ static ssize_t slab_attr_store(struct kobject *kobj, return err; } -static void kmem_cache_release(struct kobject *kobj) -{ - struct kmem_cache *s = to_slab(kobj); - - kfree(s->name); -} - static const struct sysfs_ops slab_sysfs_ops = { .show = slab_attr_show, .store = slab_attr_store, @@ -5214,7 +5198,6 @@ static const struct sysfs_ops slab_sysfs_ops = { static struct kobj_type slab_ktype = { .sysfs_ops = &slab_sysfs_ops, - .release = kmem_cache_release }; static int uevent_filter(struct kset *kset, struct kobject *kobj) -- cgit v1.2.3-70-g09d2 From cbb79694d592e9a76880f6ef6db8feccaeee1c32 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:18:32 +0000 Subject: mm/sl[aou]b: Do slab aliasing call from common code The slab aliasing logic causes some strange contortions in slub. So add a call to deal with aliases to slab_common.c but disable it for other slab allocators by providng stubs that fail to create aliases. Full general support for aliases will require additional cleanup passes and more standardization of fields in kmem_cache. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.h | 10 ++++++++++ mm/slab_common.c | 4 ++++ mm/slub.c | 15 +++++++++++---- 3 files changed, 25 insertions(+), 4 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.h b/mm/slab.h index c4f9a361bd1..84c28f451d2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache; struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +#ifdef CONFIG_SLUB +struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)); +#else +static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) +{ return NULL; } +#endif + + int __kmem_cache_shutdown(struct kmem_cache *); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index f18c06fd97c..adc42b01b25 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -115,6 +115,10 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; } + s = __kmem_cache_alias(name, size, align, flags, ctor); + if (s) + goto out_locked; + s = __kmem_cache_create(n, size, align, flags, ctor); if (s) { diff --git a/mm/slub.c b/mm/slub.c index 91c9a2fe676..64d445e7a27 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3708,7 +3708,7 @@ void __init kmem_cache_init(void) slub_max_order = 0; kmem_size = offsetof(struct kmem_cache, node) + - nr_node_ids * sizeof(struct kmem_cache_node *); + nr_node_ids * sizeof(struct kmem_cache_node *); /* Allocate two kmem_caches from the page allocator */ kmalloc_size = ALIGN(kmem_size, cache_line_size()); @@ -3922,7 +3922,7 @@ static struct kmem_cache *find_mergeable(size_t size, return NULL; } -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s; @@ -3939,11 +3939,18 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (sysfs_slab_alias(s, name)) { s->refcount--; - return NULL; + s = NULL; } - return s; } + return s; +} + +struct kmem_cache *__kmem_cache_create(const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) +{ + struct kmem_cache *s; + s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); if (s) { if (kmem_cache_open(s, name, -- cgit v1.2.3-70-g09d2 From 96d17b7be0a9849d381442030886211dbb2a7061 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:18:32 +0000 Subject: mm/sl[aou]b: Move sysfs_slab_add to common Simplify locking by moving the slab_add_sysfs after all locks have been dropped. Eases the upcoming move to provide sysfs support for all allocators. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.h | 3 +++ mm/slab_common.c | 8 ++++++++ mm/slub.c | 15 ++------------- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.h b/mm/slab.h index 84c28f451d2..ec7b94429b9 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -39,10 +39,13 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +extern int sysfs_slab_add(struct kmem_cache *s); #else static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } + #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index adc42b01b25..4f722084bae 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -152,6 +152,14 @@ out_locked: return NULL; } + if (s->refcount == 1) { + err = sysfs_slab_add(s); + if (err) + printk(KERN_WARNING "kmem_cache_create(%s) failed to" + " create sysfs entry. Error %d\n", + name, err); + } + return s; } EXPORT_SYMBOL(kmem_cache_create); diff --git a/mm/slub.c b/mm/slub.c index 64d445e7a27..8d00fd78df2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -202,12 +202,10 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS -static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { } @@ -3955,16 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, if (s) { if (kmem_cache_open(s, name, size, align, flags, ctor)) { - int r; - - mutex_unlock(&slab_mutex); - r = sysfs_slab_add(s); - mutex_lock(&slab_mutex); - - if (!r) - return s; - - kmem_cache_close(s); + return s; } kmem_cache_free(kmem_cache, s); } @@ -5258,7 +5247,7 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -static int sysfs_slab_add(struct kmem_cache *s) +int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; -- cgit v1.2.3-70-g09d2 From 278b1bb1313664d4999a7f7d47a8a8d964862d02 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 5 Sep 2012 00:20:34 +0000 Subject: mm/sl[aou]b: Move kmem_cache allocations into common code Shift the allocations to common code. That way the allocation and freeing of the kmem_cache structures is handled by common code. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 34 ++++++++++++++++------------------ mm/slab.h | 4 ++-- mm/slab_common.c | 18 ++++++++++-------- mm/slob.c | 42 +++++++++++++++++------------------------- mm/slub.c | 24 +++++++----------------- 5 files changed, 52 insertions(+), 70 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index de961b48a6a..abc83334e5f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1676,7 +1676,8 @@ void __init kmem_cache_init(void) * bug. */ - sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1684,8 +1685,8 @@ void __init kmem_cache_init(void) list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); if (INDEX_AC != INDEX_L3) { - sizes[INDEX_L3].cs_cachep = - __kmem_cache_create(names[INDEX_L3].name, + sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1704,7 +1705,8 @@ void __init kmem_cache_init(void) * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { - sizes->cs_cachep = __kmem_cache_create(names->name, + sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes->cs_cachep, names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, @@ -1712,7 +1714,8 @@ void __init kmem_cache_init(void) list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA - sizes->cs_dmacachep = __kmem_cache_create( + sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + __kmem_cache_create(sizes->cs_dmacachep, names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, @@ -2356,13 +2359,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ -struct kmem_cache * -__kmem_cache_create (const char *name, size_t size, size_t align, +int +__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { size_t left_over, slab_size, ralign; - struct kmem_cache *cachep = NULL; gfp_t gfp; + int err; #if DEBUG #if FORCED_DEBUG @@ -2450,11 +2453,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, else gfp = GFP_NOWAIT; - /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(kmem_cache, gfp); - if (!cachep) - return NULL; - cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; cachep->object_size = size; cachep->align = align; @@ -2509,8 +2507,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, if (!cachep->num) { printk(KERN_ERR "kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(kmem_cache, cachep); - return NULL; + return -E2BIG; } slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), align); @@ -2567,9 +2564,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, cachep->name = name; cachep->refcount = 1; - if (setup_cpu_cache(cachep, gfp)) { + err = setup_cpu_cache(cachep, gfp); + if (err) { __kmem_cache_shutdown(cachep); - return NULL; + return err; } if (flags & SLAB_DEBUG_OBJECTS) { @@ -2582,7 +2580,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, slab_set_debugobj_lock_classes(cachep); } - return cachep; + return 0; } #if DEBUG diff --git a/mm/slab.h b/mm/slab.h index ec7b94429b9..077b07a24ef 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -33,8 +33,8 @@ extern struct list_head slab_caches; extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)); +extern int __kmem_cache_create(struct kmem_cache *, const char *name, + size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, diff --git a/mm/slab_common.c b/mm/slab_common.c index 4f722084bae..f50d2ed4fbf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -119,19 +119,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align if (s) goto out_locked; - s = __kmem_cache_create(n, size, align, flags, ctor); - + s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { - /* - * Check if the slab has actually been created and if it was a - * real instatiation. Aliases do not belong on the list - */ - if (s->refcount == 1) + err = __kmem_cache_create(s, n, size, align, flags, ctor); + if (!err) + list_add(&s->list, &slab_caches); + else { + kfree(n); + kmem_cache_free(kmem_cache, s); + } + } else { kfree(n); - err = -ENOSYS; /* Until __kmem_cache_create returns code */ + err = -ENOMEM; } out_locked: diff --git a/mm/slob.c b/mm/slob.c index 50f60532270..9b0cee1e847 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -508,34 +508,26 @@ size_t ksize(const void *block) } EXPORT_SYMBOL(ksize); -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { - struct kmem_cache *c; - - c = slob_alloc(sizeof(struct kmem_cache), - GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); - - if (c) { - c->name = name; - c->size = size; - if (flags & SLAB_DESTROY_BY_RCU) { - /* leave room for rcu footer at the end of object */ - c->size += sizeof(struct slob_rcu); - } - c->flags = flags; - c->ctor = ctor; - /* ignore alignment unless it's forced */ - c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; - if (c->align < ARCH_SLAB_MINALIGN) - c->align = ARCH_SLAB_MINALIGN; - if (c->align < align) - c->align = align; - - kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); - c->refcount = 1; + c->name = name; + c->size = size; + if (flags & SLAB_DESTROY_BY_RCU) { + /* leave room for rcu footer at the end of object */ + c->size += sizeof(struct slob_rcu); } - return c; + c->flags = flags; + c->ctor = ctor; + /* ignore alignment unless it's forced */ + c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; + if (c->align < ARCH_SLAB_MINALIGN) + c->align = ARCH_SLAB_MINALIGN; + if (c->align < align) + c->align = align; + + c->refcount = 1; + return 0; } void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) diff --git a/mm/slub.c b/mm/slub.c index 8d00fd78df2..0ad3fffc7d2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3034,7 +3034,6 @@ static int kmem_cache_open(struct kmem_cache *s, size_t align, unsigned long flags, void (*ctor)(void *)) { - memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; s->object_size = size; @@ -3109,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s, goto error; if (alloc_kmem_cache_cpus(s)) - return 1; + return 0; free_kmem_cache_nodes(s); error: @@ -3118,7 +3117,7 @@ error: "order=%u offset=%u flags=%lx\n", s->name, (unsigned long)size, s->size, oo_order(s->oo), s->offset, flags); - return 0; + return -EINVAL; } /* @@ -3260,13 +3259,13 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, { struct kmem_cache *s; - s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); + s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); /* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slab_mutex here. */ - if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, + if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL)) goto panic; @@ -3944,20 +3943,11 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, return s; } -struct kmem_cache *__kmem_cache_create(const char *name, size_t size, +int __kmem_cache_create(struct kmem_cache *s, + const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { - struct kmem_cache *s; - - s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); - if (s) { - if (kmem_cache_open(s, name, - size, align, flags, ctor)) { - return s; - } - kmem_cache_free(kmem_cache, s); - } - return NULL; + return kmem_cache_open(s, name, size, align, flags, ctor); } #ifdef CONFIG_SMP -- cgit v1.2.3-70-g09d2 From 8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:18:33 +0000 Subject: mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists Do the initial settings of the fields in common code. This will allow us to push more processing into common code later and improve readability. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 73 +++++++++++++++++++++++++------------------------------- mm/slab.h | 3 +-- mm/slab_common.c | 26 ++++++++++---------- mm/slob.c | 8 +++---- mm/slub.c | 39 ++++++++++++++---------------- 5 files changed, 68 insertions(+), 81 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index abc83334e5f..f1f6d54e129 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void) */ sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); - + sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; + sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); + if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; + sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); } @@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void) */ if (!sizes->cs_cachep) { sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes->cs_cachep, names->name, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes->cs_cachep->name = names->name; + sizes->cs_cachep->size = sizes->cs_size; + sizes->cs_cachep->object_size = sizes->cs_size; + sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + sizes->cs_dmacachep->name = names->name_dma; + sizes->cs_dmacachep->size = sizes->cs_size; + sizes->cs_dmacachep->object_size = sizes->cs_size; + sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; __kmem_cache_create(sizes->cs_dmacachep, - names->name_dma, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| - SLAB_PANIC, - NULL); + ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); list_add(&sizes->cs_dmacachep->list, &slab_caches); #endif sizes++; @@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * as davem. */ int -__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void *)) +__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { size_t left_over, slab_size, ralign; gfp_t gfp; int err; + size_t size = cachep->size; #if DEBUG #if FORCED_DEBUG @@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ralign = ARCH_SLAB_MINALIGN; } /* 3) caller mandated alignment */ - if (ralign < align) { - ralign = align; + if (ralign < cachep->align) { + ralign = cachep->align; } /* disable debug if necessary */ if (ralign > __alignof__(unsigned long long)) @@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s /* * 4) Store it. */ - align = ralign; + cachep->align = ralign; if (slab_is_available()) gfp = GFP_KERNEL; @@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s gfp = GFP_NOWAIT; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; - cachep->object_size = size; - cachep->align = align; #if DEBUG /* @@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ flags |= CFLGS_OFF_SLAB; - size = ALIGN(size, align); + size = ALIGN(size, cachep->align); - left_over = calculate_slab_order(cachep, size, align, flags); + left_over = calculate_slab_order(cachep, size, cachep->align, flags); - if (!cachep->num) { - printk(KERN_ERR - "kmem_cache_create: couldn't create cache %s.\n", name); + if (!cachep->num) return -E2BIG; - } + slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) - + sizeof(struct slab), align); + + sizeof(struct slab), cachep->align); /* * If the slab has been placed off-slab, and we have enough space then @@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ - if (cachep->colour_off < align) - cachep->colour_off = align; + if (cachep->colour_off < cachep->align) + cachep->colour_off = cachep->align; cachep->colour = left_over / cachep->colour_off; cachep->slab_size = slab_size; cachep->flags = flags; @@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } - cachep->ctor = ctor; - cachep->name = name; cachep->refcount = 1; err = setup_cpu_cache(cachep, gfp); diff --git a/mm/slab.h b/mm/slab.h index 077b07a24ef..67aeaa2d39c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -33,8 +33,7 @@ extern struct list_head slab_caches; extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ -extern int __kmem_cache_create(struct kmem_cache *, const char *name, - size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, diff --git a/mm/slab_common.c b/mm/slab_common.c index f50d2ed4fbf..8a85a19d90e 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align { struct kmem_cache *s = NULL; int err = 0; - char *n; get_online_cpus(); mutex_lock(&slab_mutex); @@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; - n = kstrdup(name, GFP_KERNEL); - if (!n) { - err = -ENOMEM; - goto out_locked; - } - s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_locked; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { - err = __kmem_cache_create(s, n, size, align, flags, ctor); + s->object_size = s->size = size; + s->align = align; + s->ctor = ctor; + s->name = kstrdup(name, GFP_KERNEL); + if (!s->name) { + kmem_cache_free(kmem_cache, s); + err = -ENOMEM; + goto out_locked; + } + + err = __kmem_cache_create(s, flags); if (!err) list_add(&s->list, &slab_caches); else { - kfree(n); + kfree(s->name); kmem_cache_free(kmem_cache, s); } - - } else { - kfree(n); + } else err = -ENOMEM; - } out_locked: mutex_unlock(&slab_mutex); diff --git a/mm/slob.c b/mm/slob.c index 9b0cee1e847..cac05d92f32 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -508,17 +508,15 @@ size_t ksize(const void *block) } EXPORT_SYMBOL(ksize); -int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) { - c->name = name; - c->size = size; + size_t align = c->size; + if (flags & SLAB_DESTROY_BY_RCU) { /* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } c->flags = flags; - c->ctor = ctor; /* ignore alignment unless it's forced */ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < ARCH_SLAB_MINALIGN) diff --git a/mm/slub.c b/mm/slub.c index 0ad3fffc7d2..d8ee419d5a1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) } -static int kmem_cache_open(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(void *)) +static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) { - s->name = name; - s->ctor = ctor; - s->object_size = size; - s->align = align; - s->flags = kmem_cache_flags(size, flags, name, ctor); + s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->reserved = 0; if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) @@ -3115,7 +3108,7 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", - s->name, (unsigned long)size, s->size, oo_order(s->oo), + s->name, (unsigned long)s->size, s->size, oo_order(s->oo), s->offset, flags); return -EINVAL; } @@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + s->name = name; + s->size = s->object_size = size; + s->align = ARCH_KMALLOC_MINALIGN; + /* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slab_mutex here. */ - if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) + if (kmem_cache_open(s, flags)) goto panic; list_add(&s->list, &slab_caches); @@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void) */ kmem_cache_node = (void *)kmem_cache + kmalloc_size; - kmem_cache_open(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache_node->name = "kmem_cache_node"; + kmem_cache_node->size = kmem_cache_node->object_size = + sizeof(struct kmem_cache_node); + kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); @@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void) slab_state = PARTIAL; temp_kmem_cache = kmem_cache; - kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache->name = "kmem_cache"; + kmem_cache->size = kmem_cache->object_size = kmem_size; + kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache, temp_kmem_cache, kmem_size); @@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, return s; } -int __kmem_cache_create(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) { - return kmem_cache_open(s, name, size, align, flags, ctor); + return kmem_cache_open(s, flags); } #ifdef CONFIG_SMP -- cgit v1.2.3-70-g09d2 From cce89f4f6911286500cf7be0363f46c9b0a12ce0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Sep 2012 23:38:33 +0000 Subject: mm/sl[aou]b: Move kmem_cache refcounting to common code Get rid of the refcount stuff in the allocators and do that part of kmem_cache management in the common code. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 1 - mm/slab_common.c | 5 +++-- mm/slob.c | 1 - mm/slub.c | 1 - 4 files changed, 3 insertions(+), 5 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index f1f6d54e129..11d9af5f9d2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2555,7 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) */ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } - cachep->refcount = 1; err = setup_cpu_cache(cachep, gfp); if (err) { diff --git a/mm/slab_common.c b/mm/slab_common.c index 8a85a19d90e..651a3c60847 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -125,11 +125,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align } err = __kmem_cache_create(s, flags); - if (!err) + if (!err) { + s->refcount = 1; list_add(&s->list, &slab_caches); - else { + } else { kfree(s->name); kmem_cache_free(kmem_cache, s); } diff --git a/mm/slob.c b/mm/slob.c index cac05d92f32..3edfeaac320 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -524,7 +524,6 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) if (c->align < align) c->align = align; - c->refcount = 1; return 0; } diff --git a/mm/slub.c b/mm/slub.c index d8ee419d5a1..0b122d8ec21 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3093,7 +3093,6 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) else s->cpu_partial = 30; - s->refcount = 1; #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 1000; #endif -- cgit v1.2.3-70-g09d2 From aac3a1664aba429f47c70edfc76ee10fcd808471 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 5 Sep 2012 12:07:44 +0300 Subject: Revert "mm/sl[aou]b: Move sysfs_slab_add to common" This reverts commit 96d17b7be0a9849d381442030886211dbb2a7061 which caused the following errors at boot: [ 1.114885] kobject (ffff88001a802578): tried to init an initialized object, something is seriously wrong. [ 1.114885] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.114885] Call Trace: [ 1.114885] [] kobject_init+0x87/0xa0 [ 1.115555] [] kobject_init_and_add+0x2a/0x90 [ 1.115555] [] ? sprintf+0x40/0x50 [ 1.115555] [] sysfs_slab_add+0x80/0x210 [ 1.115555] [] kmem_cache_create+0xa5/0x250 [ 1.115555] [] ? md_init+0x144/0x144 [ 1.115555] [] local_init+0xa4/0x11b [ 1.115555] [] dm_init+0x14/0x45 [ 1.115836] [] do_one_initcall+0x3a/0x160 [ 1.116834] [] kernel_init+0x133/0x1b7 [ 1.117835] [] ? do_early_param+0x86/0x86 [ 1.117835] [] kernel_thread_helper+0x4/0x10 [ 1.118401] [] ? start_kernel+0x33f/0x33f [ 1.119832] [] ? gs_change+0xb/0xb [ 1.120325] ------------[ cut here ]------------ [ 1.120835] WARNING: at fs/sysfs/dir.c:536 sysfs_add_one+0xc1/0xf0() [ 1.121437] sysfs: cannot create duplicate filename '/kernel/slab/:t-0000016' [ 1.121831] Modules linked in: [ 1.122138] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.122831] Call Trace: [ 1.123074] [] ? sysfs_add_one+0xc1/0xf0 [ 1.123833] [] warn_slowpath_common+0x7a/0xb0 [ 1.124405] [] warn_slowpath_fmt+0x41/0x50 [ 1.124832] [] sysfs_add_one+0xc1/0xf0 [ 1.125337] [] create_dir+0x73/0xd0 [ 1.125832] [] sysfs_create_dir+0x81/0xe0 [ 1.126363] [] kobject_add_internal+0x9d/0x210 [ 1.126832] [] kobject_init_and_add+0x63/0x90 [ 1.127406] [] sysfs_slab_add+0x80/0x210 [ 1.127832] [] kmem_cache_create+0xa5/0x250 [ 1.128384] [] ? md_init+0x144/0x144 [ 1.128833] [] local_init+0xa4/0x11b [ 1.129831] [] dm_init+0x14/0x45 [ 1.130305] [] do_one_initcall+0x3a/0x160 [ 1.130831] [] kernel_init+0x133/0x1b7 [ 1.131351] [] ? do_early_param+0x86/0x86 [ 1.131830] [] kernel_thread_helper+0x4/0x10 [ 1.132392] [] ? start_kernel+0x33f/0x33f [ 1.132830] [] ? gs_change+0xb/0xb [ 1.133315] ---[ end trace 2703540871c8fab7 ]--- [ 1.133830] ------------[ cut here ]------------ [ 1.134274] WARNING: at lib/kobject.c:196 kobject_add_internal+0x1f5/0x210() [ 1.134829] kobject_add_internal failed for :t-0000016 with -EEXIST, don't try to register things with the same name in the same directory. [ 1.135829] Modules linked in: [ 1.136135] Pid: 1, comm: swapper/0 Tainted: G W 3.6.0-rc1+ #6 [ 1.136828] Call Trace: [ 1.137071] [] ? kobject_add_internal+0x1f5/0x210 [ 1.137830] [] warn_slowpath_common+0x7a/0xb0 [ 1.138402] [] warn_slowpath_fmt+0x41/0x50 [ 1.138830] [] ? release_sysfs_dirent+0x73/0xf0 [ 1.139419] [] kobject_add_internal+0x1f5/0x210 [ 1.139830] [] kobject_init_and_add+0x63/0x90 [ 1.140429] [] sysfs_slab_add+0x80/0x210 [ 1.140830] [] kmem_cache_create+0xa5/0x250 [ 1.141829] [] ? md_init+0x144/0x144 [ 1.142307] [] local_init+0xa4/0x11b [ 1.142829] [] dm_init+0x14/0x45 [ 1.143307] [] do_one_initcall+0x3a/0x160 [ 1.143829] [] kernel_init+0x133/0x1b7 [ 1.144352] [] ? do_early_param+0x86/0x86 [ 1.144829] [] kernel_thread_helper+0x4/0x10 [ 1.145405] [] ? start_kernel+0x33f/0x33f [ 1.145828] [] ? gs_change+0xb/0xb [ 1.146313] ---[ end trace 2703540871c8fab8 ]--- Conflicts: mm/slub.c Signed-off-by: Pekka Enberg --- mm/slab.h | 3 --- mm/slab_common.c | 8 -------- mm/slub.c | 19 +++++++++++++++++-- 3 files changed, 17 insertions(+), 13 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slab.h b/mm/slab.h index 67aeaa2d39c..7deeb449a30 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -38,13 +38,10 @@ extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); -extern int sysfs_slab_add(struct kmem_cache *s); #else static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } -static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } - #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index 651a3c60847..9c217255ac4 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -155,14 +155,6 @@ out_locked: return NULL; } - if (s->refcount == 1) { - err = sysfs_slab_add(s); - if (err) - printk(KERN_WARNING "kmem_cache_create(%s) failed to" - " create sysfs entry. Error %d\n", - name, err); - } - return s; } EXPORT_SYMBOL(kmem_cache_create); diff --git a/mm/slub.c b/mm/slub.c index 0b122d8ec21..dafd465f7a3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -202,10 +202,12 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS +static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { } @@ -3943,7 +3945,20 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) { - return kmem_cache_open(s, flags); + int err; + + err = kmem_cache_open(s, flags); + if (err) + return err; + + mutex_unlock(&slab_mutex); + err = sysfs_slab_add(s); + mutex_lock(&slab_mutex); + + if (err) + kmem_cache_close(s); + + return err; } #ifdef CONFIG_SMP @@ -5233,7 +5248,7 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -int sysfs_slab_add(struct kmem_cache *s) +static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; -- cgit v1.2.3-70-g09d2 From 9df53b154ac712c87db1170057aa6df05eb7bdbd Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 8 Sep 2012 18:27:10 +0000 Subject: slub: Zero initial memory segment for kmem_cache and kmem_cache_node Tony Luck reported the following problem on IA-64: Worked fine yesterday on next-20120905, crashes today. First sign of trouble was an unaligned access, then a NULL dereference. SL*B related bits of my config: CONFIG_SLUB_DEBUG=y # CONFIG_SLAB is not set CONFIG_SLUB=y CONFIG_SLABINFO=y # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set And he console log. PID hash table entries: 4096 (order: 1, 32768 bytes) Dentry cache hash table entries: 262144 (order: 7, 2097152 bytes) Inode-cache hash table entries: 131072 (order: 6, 1048576 bytes) Memory: 2047920k/2086064k available (13992k code, 38144k reserved, 6012k data, 880k init) kernel unaligned access to 0xca2ffc55fb373e95, ip=0xa0000001001be550 swapper[0]: error during unaligned kernel access -1 [1] Modules linked in: Pid: 0, CPU 0, comm: swapper psr : 00001010084a2018 ifs : 800000000000060f ip : [] Not tainted (3.6.0-rc4-zx1-smp-next-20120906) ip is at new_slab+0x90/0x680 unat: 0000000000000000 pfs : 000000000000060f rsc : 0000000000000003 rnat: 9666960159966a59 bsps: a0000001001441c0 pr : 9666960159965a59 ldrs: 0000000000000000 ccv : 0000000000000000 fpsr: 0009804c8a70433f csd : 0000000000000000 ssd : 0000000000000000 b0 : a0000001001be500 b6 : a00000010112cb20 b7 : a0000001011660a0 f6 : 0fff7f0f0f0f0e54f0000 f7 : 0ffe8c5c1000000000000 f8 : 1000d8000000000000000 f9 : 100068800000000000000 f10 : 10005f0f0f0f0e54f0000 f11 : 1003e0000000000000078 r1 : a00000010155eef0 r2 : 0000000000000000 r3 : fffffffffffc1638 r8 : e0000040600081b8 r9 : ca2ffc55fb373e95 r10 : 0000000000000000 r11 : e000004040001646 r12 : a000000101287e20 r13 : a000000101280000 r14 : 0000000000004000 r15 : 0000000000000078 r16 : ca2ffc55fb373e75 r17 : e000004040040000 r18 : fffffffffffc1646 r19 : e000004040001646 r20 : fffffffffffc15f8 r21 : 000000000000004d r22 : a00000010132fa68 r23 : 00000000000000ed r24 : 0000000000000000 r25 : 0000000000000000 r26 : 0000000000000001 r27 : a0000001012b8500 r28 : a00000010135f4a0 r29 : 0000000000000000 r30 : 0000000000000000 r31 : 0000000000000001 Unable to handle kernel NULL pointer dereference (address 0000000000000018) swapper[0]: Oops 11003706212352 [2] Modules linked in: Pid: 0, CPU 0, comm: swapper psr : 0000121008022018 ifs : 800000000000cc18 ip : [] Not tainted (3.6.0-rc4-zx1-smp-next-20120906) ip is at __copy_user+0x891/0x960 unat: 0000000000000000 pfs : 0000000000000813 rsc : 0000000000000003 rnat: 0000000000000000 bsps: 0000000000000000 pr : 9666960159961765 ldrs: 0000000000000000 ccv : 0000000000000000 fpsr: 0009804c0270033f csd : 0000000000000000 ssd : 0000000000000000 b0 : a00000010004b550 b6 : a00000010004b740 b7 : a00000010000c750 f6 : 000000000000000000000 f7 : 1003e9e3779b97f4a7c16 f8 : 1003e0a00000010001550 f9 : 100068800000000000000 f10 : 10005f0f0f0f0e54f0000 f11 : 1003e0000000000000078 r1 : a00000010155eef0 r2 : a0000001012870b0 r3 : a0000001012870b8 r8 : 0000000000000298 r9 : 0000000000000013 r10 : 0000000000000000 r11 : 9666960159961a65 r12 : a000000101287010 r13 : a000000101280000 r14 : a000000101287068 r15 : a000000101287080 r16 : 0000000000000298 r17 : 0000000000000010 r18 : 0000000000000018 r19 : a000000101287310 r20 : 0000000000000290 r21 : 0000000000000000 r22 : 0000000000000000 r23 : a000000101386f58 r24 : 0000000000000000 r25 : 000000007fffffff r26 : a000000101287078 r27 : a0000001013c69b0 r28 : 0000000000000000 r29 : 0000000000000014 r30 : 0000000000000000 r31 : 0000000000000813 Sedat Dilek and Hugh Dickins reported similar problems as well. Earlier patches in the common set moved the zeroing of the kmem_cache structure into common code. See "Move allocation of kmem_cache into common code". The allocation for the two special structures is still done from SLUB specific code but no zeroing is done since the cache creation functions used to zero. This now needs to be updated so that the structures are zeroed during allocation in kmem_cache_init(). Otherwise random pointer values may be followed. Reported-by: Tony Luck Reported-by: Sedat Dilek Tested-by: Sedat Dilek Reported-by: Hugh Dickins Tested-by: Sedat Dilek Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index dafd465f7a3..2258ed82880 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3707,7 +3707,7 @@ void __init kmem_cache_init(void) /* Allocate two kmem_caches from the page allocator */ kmalloc_size = ALIGN(kmem_size, cache_line_size()); order = get_order(2 * kmalloc_size); - kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); + kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order); /* * Must first have the slab cache available for the allocations of the -- cgit v1.2.3-70-g09d2 From 645df230cacc48f4463037016e9dbd3633183fe8 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Tue, 18 Sep 2012 15:54:12 -0400 Subject: mm, sl[au]b: Taint kernel when we detect a corrupted slab It doesn't seem worth adding a new taint flag for this, so just re-use the one from 'bad page' Acked-by: Christoph Lameter # SLUB Acked-by: David Rientjes Signed-off-by: Dave Jones Signed-off-by: Pekka Enberg --- mm/slab.c | 1 + mm/slub.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'mm/slub.c') diff --git a/mm/slab.c b/mm/slab.c index cd5a9265030..5c6abb831e6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -811,6 +811,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", function, cachep->name, msg); dump_stack(); + add_taint(TAINT_BAD_PAGE); } /* diff --git a/mm/slub.c b/mm/slub.c index c67bd0a4a95..a6d043e1326 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); printk(KERN_ERR "----------------------------------------" "-------------------------------------\n\n"); + + add_taint(TAINT_BAD_PAGE); } static void slab_fix(struct kmem_cache *s, char *fmt, ...) -- cgit v1.2.3-70-g09d2 From 2b847c3cb4f8565911bd8ce59b8428e635c90594 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Sat, 8 Sep 2012 17:47:58 -0300 Subject: mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB This patch does not fix anything, and its only goal is to enable us to obtain some common code between SLAB and SLUB. Neither behavior nor produced code is affected. Cc: Christoph Lameter Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slub.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index a6d043e1326..f074f756405 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2313,7 +2313,7 @@ new_slab: * * Otherwise we can simply pick the next object from the lockless free list. */ -static __always_inline void *slab_alloc(struct kmem_cache *s, +static __always_inline void *slab_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr) { void **object; @@ -2383,9 +2383,15 @@ redo: return object; } +static __always_inline void *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, unsigned long addr) +{ + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); +} + void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); @@ -2396,7 +2402,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); return ret; } @@ -2414,7 +2420,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -2428,7 +2434,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -3366,7 +3372,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); + ret = slab_alloc(s, flags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -3409,7 +3415,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, node, _RET_IP_); + ret = slab_alloc_node(s, flags, node, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4037,7 +4043,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); + ret = slab_alloc(s, gfpflags, caller); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4067,7 +4073,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, node, caller); + ret = slab_alloc_node(s, gfpflags, node, caller); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); -- cgit v1.2.3-70-g09d2 From 788e1aadadd0d5a9cbffce10c34840b4072bc733 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Fri, 28 Sep 2012 16:34:05 +0800 Subject: slub: init_kmem_cache_cpus() and put_cpu_partial() can be static Acked-by: Glauber Costa Acked-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: Fengguang Wu Signed-off-by: Pekka Enberg --- mm/slub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index f074f756405..944b4edaeb1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1716,7 +1716,7 @@ static inline void note_cmpxchg_failure(const char *n, stat(s, CMPXCHG_DOUBLE_CPU_FAIL); } -void init_kmem_cache_cpus(struct kmem_cache *s) +static void init_kmem_cache_cpus(struct kmem_cache *s) { int cpu; @@ -1941,7 +1941,7 @@ static void unfreeze_partials(struct kmem_cache *s) * If we did not find a slot then simply move all the partials to the * per node partial list. */ -int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) +static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) { struct page *oldpage; int pages; -- cgit v1.2.3-70-g09d2