From d4d84fef6d0366b585b7de13527a0faeca84d9ce Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 2 Jun 2011 10:19:41 -0400 Subject: slub: always align cpu_slab to honor cmpxchg_double requirement On an architecture without CMPXCHG_LOCAL but with DEBUG_VM enabled, the VM_BUG_ON() in __pcpu_double_call_return_bool() will cause an early panic during boot unless we always align cpu_slab properly. In principle we could remove the alignment-testing VM_BUG_ON() for architectures that don't have CMPXCHG_LOCAL, but leaving it in means that new code will tend not to break x86 even if it is introduced on another platform, and it's low cost to require alignment. Acked-by: David Rientjes Acked-by: Christoph Lameter Signed-off-by: Chris Metcalf Signed-off-by: Pekka Enberg --- mm/slub.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 7be0223531b..35f351f2619 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); -#ifdef CONFIG_CMPXCHG_LOCAL /* - * Must align to double word boundary for the double cmpxchg instructions - * to work. + * Must align to double word boundary for the double cmpxchg + * instructions to work; see __pcpu_double_call_return_bool(). */ - s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); -#else - /* Regular alignment is sufficient */ - s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); -#endif + s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), + 2 * sizeof(void *)); if (!s->cpu_slab) return 0; -- cgit v1.2.3-70-g09d2 From a947eb95ea03199da7408a64baa97fbb613e9b84 Mon Sep 17 00:00:00 2001 From: Suleiman Souhlal Date: Thu, 2 Jun 2011 00:16:42 -0700 Subject: SLAB: Record actual last user of freed objects. Currently, when using CONFIG_DEBUG_SLAB, we put in kfree() or kmem_cache_free() as the last user of free objects, which is not very useful, so change it to the caller of those functions instead. Acked-by: David Rientjes Acked-by: Christoph Lameter Signed-off-by: Suleiman Souhlal Signed-off-by: Pekka Enberg --- mm/slab.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bcfa4987c8a..d96e223de77 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3604,13 +3604,14 @@ free_done: * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ -static inline void __cache_free(struct kmem_cache *cachep, void *objp) +static inline void __cache_free(struct kmem_cache *cachep, void *objp, + void *caller) { struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); + objp = cache_free_debugcheck(cachep, objp, caller); kmemcheck_slab_free(cachep, objp, obj_size(cachep)); @@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) debug_check_no_locks_freed(objp, obj_size(cachep)); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, obj_size(cachep)); - __cache_free(cachep, objp); + __cache_free(cachep, objp, __builtin_return_address(0)); local_irq_restore(flags); trace_kmem_cache_free(_RET_IP_, objp); @@ -3831,7 +3832,7 @@ void kfree(const void *objp) c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); debug_check_no_obj_freed(objp, obj_size(c)); - __cache_free(c, (void *)objp); + __cache_free(c, (void *)objp, __builtin_return_address(0)); local_irq_restore(flags); } EXPORT_SYMBOL(kfree); -- cgit v1.2.3-70-g09d2