diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 126 |
1 files changed, 31 insertions, 95 deletions
diff --git a/mm/slub.c b/mm/slub.c index 3e8afcc07a7..ae7b9f1ad39 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -169,16 +169,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) */ #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) -/* - * Set of flags that will prevent slab merging - */ -#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB) - -#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA | SLAB_NOTRACK) - #define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ @@ -1176,7 +1166,7 @@ out: __setup("slub_debug", setup_slub_debug); -static unsigned long kmem_cache_flags(unsigned long object_size, +unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -1208,7 +1198,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} -static inline unsigned long kmem_cache_flags(unsigned long object_size, +unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -1699,7 +1689,12 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu *c) { void *object; - int searchnode = (node == NUMA_NO_NODE) ? numa_mem_id() : node; + int searchnode = node; + + if (node == NUMA_NO_NODE) + searchnode = numa_mem_id(); + else if (!node_present_pages(node)) + searchnode = node_to_mem_node(node); object = get_partial_node(s, get_node(s, searchnode), c, flags); if (object || node != NUMA_NO_NODE) @@ -2280,11 +2275,18 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, redo: if (unlikely(!node_match(page, node))) { - stat(s, ALLOC_NODE_MISMATCH); - deactivate_slab(s, page, c->freelist); - c->page = NULL; - c->freelist = NULL; - goto new_slab; + int searchnode = node; + + if (node != NUMA_NO_NODE && !node_present_pages(node)) + searchnode = node_to_mem_node(node); + + if (unlikely(!node_match(page, searchnode))) { + stat(s, ALLOC_NODE_MISMATCH); + deactivate_slab(s, page, c->freelist); + c->page = NULL; + c->freelist = NULL; + goto new_slab; + } } /* @@ -2707,12 +2709,6 @@ static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; static int slub_min_objects; /* - * Merge control. If this is set then no merging of slab caches will occur. - * (Could be removed. This was introduced to pacify the merge skeptics.) - */ -static int slub_nomerge; - -/* * Calculate the order of allocation given an slab object size. * * The order of allocation has significant impact on performance and other @@ -3240,14 +3236,6 @@ static int __init setup_slub_min_objects(char *str) __setup("slub_min_objects=", setup_slub_min_objects); -static int __init setup_slub_nomerge(char *str) -{ - slub_nomerge = 1; - return 1; -} - -__setup("slub_nomerge", setup_slub_nomerge); - void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; @@ -3625,69 +3613,6 @@ void __init kmem_cache_init_late(void) { } -/* - * Find a mergeable slab cache - */ -static int slab_unmergeable(struct kmem_cache *s) -{ - if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) - return 1; - - if (!is_root_cache(s)) - return 1; - - if (s->ctor) - return 1; - - /* - * We may have set a slab to be unmergeable during bootstrap. - */ - if (s->refcount < 0) - return 1; - - return 0; -} - -static struct kmem_cache *find_mergeable(size_t size, size_t align, - unsigned long flags, const char *name, void (*ctor)(void *)) -{ - struct kmem_cache *s; - - if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) - return NULL; - - if (ctor) - return NULL; - - size = ALIGN(size, sizeof(void *)); - align = calculate_alignment(flags, align, size); - size = ALIGN(size, align); - flags = kmem_cache_flags(size, flags, name, NULL); - - list_for_each_entry(s, &slab_caches, list) { - if (slab_unmergeable(s)) - continue; - - if (size > s->size) - continue; - - if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) - continue; - /* - * Check if alignment is compatible. - * Courtesy of Adrian Drzewiecki - */ - if ((s->size & ~(align - 1)) != s->size) - continue; - - if (s->size - size >= sizeof(void *)) - continue; - - return s; - } - return NULL; -} - struct kmem_cache * __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) @@ -4604,6 +4529,14 @@ static ssize_t trace_show(struct kmem_cache *s, char *buf) static ssize_t trace_store(struct kmem_cache *s, const char *buf, size_t length) { + /* + * Tracing a merged cache is going to give confusing results + * as well as cause other issues like converting a mergeable + * cache into an umergeable one. + */ + if (s->refcount > 1) + return -EINVAL; + s->flags &= ~SLAB_TRACE; if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; @@ -4721,6 +4654,9 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf) static ssize_t failslab_store(struct kmem_cache *s, const char *buf, size_t length) { + if (s->refcount > 1) + return -EINVAL; + s->flags &= ~SLAB_FAILSLAB; if (buf[0] == '1') s->flags |= SLAB_FAILSLAB; |