summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c364
1 files changed, 138 insertions, 226 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a467b308c68..eb2b2ea3013 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -237,11 +237,10 @@ struct arraycache_init {
/*
* Need this for bootstrapping a per node allocator.
*/
-#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
+#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
#define CACHE_CACHE 0
-#define SIZE_AC MAX_NUMNODES
-#define SIZE_NODE (2 * MAX_NUMNODES)
+#define SIZE_NODE (MAX_NUMNODES)
static int drain_freelist(struct kmem_cache *cache,
struct kmem_cache_node *n, int tofree);
@@ -253,7 +252,6 @@ static void cache_reap(struct work_struct *unused);
static int slab_early_init = 1;
-#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -458,9 +456,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
-static struct arraycache_init initarray_generic =
- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
-
/* internal cache of cache description objs */
static struct kmem_cache kmem_cache_boot = {
.batchcount = 1,
@@ -476,7 +471,7 @@ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
- return cachep->array[smp_processor_id()];
+ return this_cpu_ptr(cachep->cpu_cache);
}
static size_t calculate_freelist_size(int nr_objs, size_t align)
@@ -785,8 +780,8 @@ static inline void *ac_get_obj(struct kmem_cache *cachep,
return objp;
}
-static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
- void *objp)
+static noinline void *__ac_put_obj(struct kmem_cache *cachep,
+ struct array_cache *ac, void *objp)
{
if (unlikely(pfmemalloc_active)) {
/* Some pfmemalloc slabs exist, check if this is one */
@@ -984,46 +979,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
}
}
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
+ int node, int page_node)
{
- int nodeid = page_to_nid(virt_to_page(objp));
struct kmem_cache_node *n;
struct alien_cache *alien = NULL;
struct array_cache *ac;
- int node;
LIST_HEAD(list);
- node = numa_mem_id();
-
- /*
- * Make sure we are not freeing a object from another node to the array
- * cache on this cpu.
- */
- if (likely(nodeid == node))
- return 0;
-
n = get_node(cachep, node);
STATS_INC_NODEFREES(cachep);
- if (n->alien && n->alien[nodeid]) {
- alien = n->alien[nodeid];
+ if (n->alien && n->alien[page_node]) {
+ alien = n->alien[page_node];
ac = &alien->ac;
spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep, ac, nodeid, &list);
+ __drain_alien_cache(cachep, ac, page_node, &list);
}
ac_put_obj(cachep, ac, objp);
spin_unlock(&alien->lock);
slabs_destroy(cachep, &list);
} else {
- n = get_node(cachep, nodeid);
+ n = get_node(cachep, page_node);
spin_lock(&n->list_lock);
- free_block(cachep, &objp, 1, nodeid, &list);
+ free_block(cachep, &objp, 1, page_node, &list);
spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
return 1;
}
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ int page_node = page_to_nid(virt_to_page(objp));
+ int node = numa_mem_id();
+ /*
+ * Make sure we are not freeing a object from another node to the array
+ * cache on this cpu.
+ */
+ if (likely(node == page_node))
+ return 0;
+
+ return __cache_free_alien(cachep, objp, node, page_node);
+}
#endif
/*
@@ -1092,24 +1091,25 @@ static void cpuup_canceled(long cpu)
struct alien_cache **alien;
LIST_HEAD(list);
- /* cpu is dead; no one can alloc from it. */
- nc = cachep->array[cpu];
- cachep->array[cpu] = NULL;
n = get_node(cachep, node);
-
if (!n)
- goto free_array_cache;
+ continue;
spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
- if (nc)
+
+ /* cpu is dead; no one can alloc from it. */
+ nc = per_cpu_ptr(cachep->cpu_cache, cpu);
+ if (nc) {
free_block(cachep, nc->entry, nc->avail, node, &list);
+ nc->avail = 0;
+ }
if (!cpumask_empty(mask)) {
spin_unlock_irq(&n->list_lock);
- goto free_array_cache;
+ goto free_slab;
}
shared = n->shared;
@@ -1129,9 +1129,9 @@ static void cpuup_canceled(long cpu)
drain_alien_cache(cachep, alien);
free_alien_cache(alien);
}
-free_array_cache:
+
+free_slab:
slabs_destroy(cachep, &list);
- kfree(nc);
}
/*
* In the previous loop, all the objects were freed to
@@ -1168,32 +1168,23 @@ static int cpuup_prepare(long cpu)
* array caches
*/
list_for_each_entry(cachep, &slab_caches, list) {
- struct array_cache *nc;
struct array_cache *shared = NULL;
struct alien_cache **alien = NULL;
- nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount, GFP_KERNEL);
- if (!nc)
- goto bad;
if (cachep->shared) {
shared = alloc_arraycache(node,
cachep->shared * cachep->batchcount,
0xbaadf00d, GFP_KERNEL);
- if (!shared) {
- kfree(nc);
+ if (!shared)
goto bad;
- }
}
if (use_alien_caches) {
alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
if (!alien) {
kfree(shared);
- kfree(nc);
goto bad;
}
}
- cachep->array[cpu] = nc;
n = get_node(cachep, node);
BUG_ON(!n);
@@ -1385,15 +1376,6 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
}
/*
- * The memory after the last cpu cache pointer is used for the
- * the node pointer.
- */
-static void setup_node_pointer(struct kmem_cache *cachep)
-{
- cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
-}
-
-/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
@@ -1404,7 +1386,6 @@ void __init kmem_cache_init(void)
BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
- setup_node_pointer(kmem_cache);
if (num_possible_nodes() == 1)
use_alien_caches = 0;
@@ -1412,8 +1393,6 @@ void __init kmem_cache_init(void)
for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_cache_node_init(&init_kmem_cache_node[i]);
- set_up_node(kmem_cache, CACHE_CACHE);
-
/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory if
@@ -1448,49 +1427,22 @@ void __init kmem_cache_init(void)
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
create_boot_cache(kmem_cache, "kmem_cache",
- offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN);
list_add(&kmem_cache->list, &slab_caches);
-
- /* 2+3) create the kmalloc caches */
+ slab_state = PARTIAL;
/*
- * Initialize the caches that provide memory for the array cache and the
- * kmem_cache_node structures first. Without this, further allocations will
- * bug.
+ * Initialize the caches that provide memory for the kmem_cache_node
+ * structures first. Without this, further allocations will bug.
*/
-
- kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
-
- if (INDEX_AC != INDEX_NODE)
- kmalloc_caches[INDEX_NODE] =
- create_kmalloc_cache("kmalloc-node",
+ kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
+ slab_state = PARTIAL_NODE;
slab_early_init = 0;
- /* 4) Replace the bootstrap head arrays */
- {
- struct array_cache *ptr;
-
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
-
- memcpy(ptr, cpu_cache_get(kmem_cache),
- sizeof(struct arraycache_init));
-
- kmem_cache->array[smp_processor_id()] = ptr;
-
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
-
- BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
- != &initarray_generic.cache);
- memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
- sizeof(struct arraycache_init));
-
- kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
- }
/* 5) Replace the bootstrap kmem_cache_node */
{
int nid;
@@ -1498,13 +1450,8 @@ void __init kmem_cache_init(void)
for_each_online_node(nid) {
init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
- init_list(kmalloc_caches[INDEX_AC],
- &init_kmem_cache_node[SIZE_AC + nid], nid);
-
- if (INDEX_AC != INDEX_NODE) {
- init_list(kmalloc_caches[INDEX_NODE],
+ init_list(kmalloc_caches[INDEX_NODE],
&init_kmem_cache_node[SIZE_NODE + nid], nid);
- }
}
}
@@ -2037,56 +1984,53 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
return left_over;
}
+static struct array_cache __percpu *alloc_kmem_cache_cpus(
+ struct kmem_cache *cachep, int entries, int batchcount)
+{
+ int cpu;
+ size_t size;
+ struct array_cache __percpu *cpu_cache;
+
+ size = sizeof(void *) * entries + sizeof(struct array_cache);
+ cpu_cache = __alloc_percpu(size, sizeof(void *));
+
+ if (!cpu_cache)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ init_arraycache(per_cpu_ptr(cpu_cache, cpu),
+ entries, batchcount);
+ }
+
+ return cpu_cache;
+}
+
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (slab_state >= FULL)
return enable_cpucache(cachep, gfp);
+ cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
+ if (!cachep->cpu_cache)
+ return 1;
+
if (slab_state == DOWN) {
- /*
- * Note: Creation of first cache (kmem_cache).
- * The setup_node is taken care
- * of by the caller of __kmem_cache_create
- */
- cachep->array[smp_processor_id()] = &initarray_generic.cache;
- slab_state = PARTIAL;
+ /* Creation of first cache (kmem_cache). */
+ set_up_node(kmem_cache, CACHE_CACHE);
} else if (slab_state == PARTIAL) {
- /*
- * Note: the second kmem_cache_create must create the cache
- * that's used by kmalloc(24), otherwise the creation of
- * further caches will BUG().
- */
- cachep->array[smp_processor_id()] = &initarray_generic.cache;
-
- /*
- * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
- * the second cache, then we need to set up all its node/,
- * otherwise the creation of further caches will BUG().
- */
- set_up_node(cachep, SIZE_AC);
- if (INDEX_AC == INDEX_NODE)
- slab_state = PARTIAL_NODE;
- else
- slab_state = PARTIAL_ARRAYCACHE;
+ /* For kmem_cache_node */
+ set_up_node(cachep, SIZE_NODE);
} else {
- /* Remaining boot caches */
- cachep->array[smp_processor_id()] =
- kmalloc(sizeof(struct arraycache_init), gfp);
+ int node;
- if (slab_state == PARTIAL_ARRAYCACHE) {
- set_up_node(cachep, SIZE_NODE);
- slab_state = PARTIAL_NODE;
- } else {
- int node;
- for_each_online_node(node) {
- cachep->node[node] =
- kmalloc_node(sizeof(struct kmem_cache_node),
- gfp, node);
- BUG_ON(!cachep->node[node]);
- kmem_cache_node_init(cachep->node[node]);
- }
+ for_each_online_node(node) {
+ cachep->node[node] = kmalloc_node(
+ sizeof(struct kmem_cache_node), gfp, node);
+ BUG_ON(!cachep->node[node]);
+ kmem_cache_node_init(cachep->node[node]);
}
}
+
cachep->node[numa_mem_id()]->next_reap =
jiffies + REAPTIMEOUT_NODE +
((unsigned long)cachep) % REAPTIMEOUT_NODE;
@@ -2100,6 +2044,32 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
return 0;
}
+unsigned long kmem_cache_flags(unsigned long object_size,
+ unsigned long flags, const char *name,
+ void (*ctor)(void *))
+{
+ return flags;
+}
+
+struct kmem_cache *
+__kmem_cache_alias(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *cachep;
+
+ cachep = find_mergeable(size, align, flags, name, ctor);
+ if (cachep) {
+ cachep->refcount++;
+
+ /*
+ * Adjust the object sizes so that we clear
+ * the complete object on kzalloc.
+ */
+ cachep->object_size = max_t(int, cachep->object_size, size);
+ }
+ return cachep;
+}
+
/**
* __kmem_cache_create - Create a cache.
* @cachep: cache management descriptor
@@ -2124,7 +2094,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
- size_t left_over, freelist_size, ralign;
+ size_t left_over, freelist_size;
+ size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
int err;
size_t size = cachep->size;
@@ -2157,14 +2128,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(BYTES_PER_WORD - 1);
}
- /*
- * Redzoning and user store require word alignment or possibly larger.
- * Note this will be overridden by architecture or caller mandated
- * alignment if either is greater than BYTES_PER_WORD.
- */
- if (flags & SLAB_STORE_USER)
- ralign = BYTES_PER_WORD;
-
if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
@@ -2190,7 +2153,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
else
gfp = GFP_NOWAIT;
- setup_node_pointer(cachep);
#if DEBUG
/*
@@ -2447,8 +2409,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
if (rc)
return rc;
- for_each_online_cpu(i)
- kfree(cachep->array[i]);
+ free_percpu(cachep->cpu_cache);
/* NUMA: free the node structures */
for_each_kmem_cache_node(cachep, i, n) {
@@ -2994,7 +2955,7 @@ out:
#ifdef CONFIG_NUMA
/*
- * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
+ * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
*
* If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy.
@@ -3226,7 +3187,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
void *objp;
- if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
+ if (current->mempolicy || cpuset_do_slab_mem_spread()) {
objp = alternate_node_alloc(cache, flags);
if (objp)
goto out;
@@ -3406,7 +3367,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
return;
- if (likely(ac->avail < ac->limit)) {
+ if (ac->avail < ac->limit) {
STATS_INC_FREEHIT(cachep);
} else {
STATS_INC_FREEMISS(cachep);
@@ -3503,7 +3464,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
return kmem_cache_alloc_node_trace(cachep, flags, node, size);
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node, _RET_IP_);
@@ -3516,13 +3476,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#else
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __do_kmalloc_node(size, flags, node, 0);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
#endif /* CONFIG_NUMA */
/**
@@ -3548,8 +3501,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return ret;
}
-
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, _RET_IP_);
@@ -3562,14 +3513,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
}
EXPORT_SYMBOL(__kmalloc_track_caller);
-#else
-void *__kmalloc(size_t size, gfp_t flags)
-{
- return __do_kmalloc(size, flags, 0);
-}
-EXPORT_SYMBOL(__kmalloc);
-#endif
-
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
@@ -3714,72 +3657,45 @@ fail:
return -ENOMEM;
}
-struct ccupdate_struct {
- struct kmem_cache *cachep;
- struct array_cache *new[0];
-};
-
-static void do_ccupdate_local(void *info)
-{
- struct ccupdate_struct *new = info;
- struct array_cache *old;
-
- check_irq_off();
- old = cpu_cache_get(new->cachep);
-
- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
- new->new[smp_processor_id()] = old;
-}
-
/* Always called with the slab_mutex held */
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
- struct ccupdate_struct *new;
- int i;
+ struct array_cache __percpu *cpu_cache, *prev;
+ int cpu;
- new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
- gfp);
- if (!new)
+ cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
+ if (!cpu_cache)
return -ENOMEM;
- for_each_online_cpu(i) {
- new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
- batchcount, gfp);
- if (!new->new[i]) {
- for (i--; i >= 0; i--)
- kfree(new->new[i]);
- kfree(new);
- return -ENOMEM;
- }
- }
- new->cachep = cachep;
-
- on_each_cpu(do_ccupdate_local, (void *)new, 1);
+ prev = cachep->cpu_cache;
+ cachep->cpu_cache = cpu_cache;
+ kick_all_cpus_sync();
check_irq_on();
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
- for_each_online_cpu(i) {
+ if (!prev)
+ goto alloc_node;
+
+ for_each_online_cpu(cpu) {
LIST_HEAD(list);
- struct array_cache *ccold = new->new[i];
int node;
struct kmem_cache_node *n;
+ struct array_cache *ac = per_cpu_ptr(prev, cpu);
- if (!ccold)
- continue;
-
- node = cpu_to_mem(i);
+ node = cpu_to_mem(cpu);
n = get_node(cachep, node);
spin_lock_irq(&n->list_lock);
- free_block(cachep, ccold->entry, ccold->avail, node, &list);
+ free_block(cachep, ac->entry, ac->avail, node, &list);
spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
- kfree(ccold);
}
- kfree(new);
+ free_percpu(prev);
+
+alloc_node:
return alloc_kmem_cache_node(cachep, gfp);
}
@@ -4262,19 +4178,15 @@ static const struct seq_operations slabstats_op = {
static int slabstats_open(struct inode *inode, struct file *file)
{
- unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
- int ret = -ENOMEM;
- if (n) {
- ret = seq_open(file, &slabstats_op);
- if (!ret) {
- struct seq_file *m = file->private_data;
- *n = PAGE_SIZE / (2 * sizeof(unsigned long));
- m->private = n;
- n = NULL;
- }
- kfree(n);
- }
- return ret;
+ unsigned long *n;
+
+ n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
+ if (!n)
+ return -ENOMEM;
+
+ *n = PAGE_SIZE / (2 * sizeof(unsigned long));
+
+ return 0;
}
static const struct file_operations proc_slabstats_operations = {