summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 15:14:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 15:14:29 -0700
commit54be8200198ddfc6cb396720460c19881fac2d5a (patch)
tree58ccab6e0cfb35b30e7e16804f15fe9c94628f12 /mm/slub.c
parent41d9884c44237cd66e2bdbc412028b29196b344c (diff)
parentc25f195e828f847735c7626b5693ddc3b853d245 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 27 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 57707f01bcf..3b482c86300 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -123,6 +123,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ return !kmem_cache_debug(s);
+#else
+ return false;
+#endif
+}
+
/*
* Issues still to be resolved:
*
@@ -1573,7 +1582,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
- if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+ if (!kmem_cache_has_cpu_partial(s)
+ || available > s->cpu_partial / 2)
break;
}
@@ -1884,6 +1894,7 @@ redo:
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
@@ -1938,6 +1949,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+#endif
}
/*
@@ -1951,10 +1963,14 @@ static void unfreeze_partials(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
int pages;
int pobjects;
+ if (!s->cpu_partial)
+ return;
+
do {
pages = 0;
pobjects = 0;
@@ -1987,6 +2003,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+#endif
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2358,7 +2375,7 @@ redo:
object = c->freelist;
page = c->page;
- if (unlikely(!object || !node_match(page, node)))
+ if (unlikely(!object || !page || !node_match(page, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
@@ -2495,7 +2512,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {
- if (!kmem_cache_debug(s) && !prior)
+ if (kmem_cache_has_cpu_partial(s) && !prior)
/*
* Slab was on no list before and will be partially empty
@@ -2550,8 +2567,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
- if (kmem_cache_debug(s) && unlikely(!prior)) {
- remove_full(s, page);
+ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
+ if (kmem_cache_debug(s))
+ remove_full(s, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -3059,7 +3077,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
*/
- if (kmem_cache_debug(s))
+ if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
@@ -4456,7 +4474,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
err = strict_strtoul(buf, 10, &objects);
if (err)
return err;
- if (objects && kmem_cache_debug(s))
+ if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;
s->cpu_partial = objects;
@@ -5269,7 +5287,6 @@ __initcall(slab_sysfs_init);
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
{
- unsigned long nr_partials = 0;
unsigned long nr_slabs = 0;
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
@@ -5281,9 +5298,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
if (!n)
continue;
- nr_partials += n->nr_partial;
- nr_slabs += atomic_long_read(&n->nr_slabs);
- nr_objs += atomic_long_read(&n->total_objects);
+ nr_slabs += node_nr_slabs(n);
+ nr_objs += node_nr_objs(n);
nr_free += count_partial(n, count_free);
}