diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c index 93de30db95f..94d2a33a866 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -64,7 +64,7 @@ * we must stay away from it for a while since we may cause a bouncing * cacheline if we try to acquire the lock. So go onto the next slab. * If all pages are busy then we may allocate a new slab instead of reusing - * a partial slab. A new slab has noone operating on it and thus there is + * a partial slab. A new slab has no one operating on it and thus there is * no danger of cacheline contention. * * Interrupts are disabled during allocation and deallocation in order to @@ -849,11 +849,11 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) local_irq_save(flags); kmemcheck_slab_free(s, x, s->objsize); debug_check_no_locks_freed(x, s->objsize); - if (!(s->flags & SLAB_DEBUG_OBJECTS)) - debug_check_no_obj_freed(x, s->objsize); local_irq_restore(flags); } #endif + if (!(s->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(x, s->objsize); } /* @@ -1604,7 +1604,7 @@ static inline void note_cmpxchg_failure(const char *n, void init_kmem_cache_cpus(struct kmem_cache *s) { -#if defined(CONFIG_CMPXCHG_LOCAL) && defined(CONFIG_PREEMPT) +#ifdef CONFIG_CMPXCHG_LOCAL int cpu; for_each_possible_cpu(cpu) @@ -1929,7 +1929,7 @@ redo: else { #ifdef CONFIG_CMPXCHG_LOCAL /* - * The cmpxchg will only match if there was no additonal + * The cmpxchg will only match if there was no additional * operation and if we are on the right processor. * * The cmpxchg does the following atomically (without lock semantics!) @@ -3547,7 +3547,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); - /* Honor the call site pointer we recieved. */ + /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); return ret; @@ -3577,7 +3577,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ret = slab_alloc(s, gfpflags, node, caller); - /* Honor the call site pointer we recieved. */ + /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); return ret; |