summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c171
1 files changed, 44 insertions, 127 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6d5c83c6ddd..2c3a2e0394d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -162,23 +162,6 @@
*/
static bool pfmemalloc_active __read_mostly;
-/* Legal flag mask for kmem_cache_create(). */
-#if DEBUG
-# define CREATE_MASK (SLAB_RED_ZONE | \
- SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
- SLAB_STORE_USER | \
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
-#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
-#endif
-
/*
* kmem_bufctl_t:
*
@@ -564,15 +547,11 @@ static struct cache_names __initdata cache_names[] = {
#undef CACHE
};
-static struct arraycache_init initarray_cache __initdata =
- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache kmem_cache_boot = {
- .nodelists = kmem_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
@@ -1577,28 +1556,33 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
}
/*
+ * The memory after the last cpu cache pointer is used for the
+ * the nodelists pointer.
+ */
+static void setup_nodelists_pointer(struct kmem_cache *cachep)
+{
+ cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+}
+
+/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
void __init kmem_cache_init(void)
{
- size_t left_over;
struct cache_sizes *sizes;
struct cache_names *names;
int i;
- int order;
- int node;
kmem_cache = &kmem_cache_boot;
+ setup_nodelists_pointer(kmem_cache);
if (num_possible_nodes() == 1)
use_alien_caches = 0;
- for (i = 0; i < NUM_INIT_LISTS; i++) {
+ for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_list3_init(&initkmem_list3[i]);
- if (i < MAX_NUMNODES)
- kmem_cache->nodelists[i] = NULL;
- }
+
set_up_list3s(kmem_cache, CACHE_CACHE);
/*
@@ -1629,37 +1613,16 @@ void __init kmem_cache_init(void)
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
- node = numa_mem_id();
-
/* 1) create the kmem_cache */
- INIT_LIST_HEAD(&slab_caches);
- list_add(&kmem_cache->list, &slab_caches);
- kmem_cache->colour_off = cache_line_size();
- kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
- kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
- nr_node_ids * sizeof(struct kmem_list3 *);
- kmem_cache->object_size = kmem_cache->size;
- kmem_cache->size = ALIGN(kmem_cache->object_size,
- cache_line_size());
- kmem_cache->reciprocal_buffer_size =
- reciprocal_value(kmem_cache->size);
-
- for (order = 0; order < MAX_ORDER; order++) {
- cache_estimate(order, kmem_cache->size,
- cache_line_size(), 0, &left_over, &kmem_cache->num);
- if (kmem_cache->num)
- break;
- }
- BUG_ON(!kmem_cache->num);
- kmem_cache->gfporder = order;
- kmem_cache->colour = left_over / kmem_cache->colour_off;
- kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
- sizeof(struct slab), cache_line_size());
+ create_boot_cache(kmem_cache, "kmem_cache",
+ offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ nr_node_ids * sizeof(struct kmem_list3 *),
+ SLAB_HWCACHE_ALIGN);
+ list_add(&kmem_cache->list, &slab_caches);
/* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
@@ -1671,23 +1634,13 @@ void __init kmem_cache_init(void)
* bug.
*/
- sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
- sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
- sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
- sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
-
- if (INDEX_AC != INDEX_L3) {
- sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
- sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
- sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
- sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
- }
+ sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
+
+ if (INDEX_AC != INDEX_L3)
+ sizes[INDEX_L3].cs_cachep =
+ create_kmalloc_cache(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
slab_early_init = 0;
@@ -1699,24 +1652,14 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
- if (!sizes->cs_cachep) {
- sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes->cs_cachep->name = names->name;
- sizes->cs_cachep->size = sizes->cs_size;
- sizes->cs_cachep->object_size = sizes->cs_size;
- sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes->cs_cachep->list, &slab_caches);
- }
+ if (!sizes->cs_cachep)
+ sizes->cs_cachep = create_kmalloc_cache(names->name,
+ sizes->cs_size, ARCH_KMALLOC_FLAGS);
+
#ifdef CONFIG_ZONE_DMA
- sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes->cs_dmacachep->name = names->name_dma;
- sizes->cs_dmacachep->size = sizes->cs_size;
- sizes->cs_dmacachep->object_size = sizes->cs_size;
- sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes->cs_dmacachep,
- ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
- list_add(&sizes->cs_dmacachep->list, &slab_caches);
+ sizes->cs_dmacachep = create_kmalloc_cache(
+ names->name_dma, sizes->cs_size,
+ SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
#endif
sizes++;
names++;
@@ -1727,7 +1670,6 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init));
/*
@@ -2282,7 +2224,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
if (slab_state == DOWN) {
/*
- * Note: the first kmem_cache_create must create the cache
+ * Note: Creation of first cache (kmem_cache).
+ * The setup_list3s is taken care
+ * of by the caller of __kmem_cache_create
+ */
+ cachep->array[smp_processor_id()] = &initarray_generic.cache;
+ slab_state = PARTIAL;
+ } else if (slab_state == PARTIAL) {
+ /*
+ * Note: the second kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
* further caches will BUG().
*/
@@ -2290,7 +2240,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
/*
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
- * the first cache, then we need to set up all its list3s,
+ * the second cache, then we need to set up all its list3s,
* otherwise the creation of further caches will BUG().
*/
set_up_list3s(cachep, SIZE_AC);
@@ -2299,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
else
slab_state = PARTIAL_ARRAYCACHE;
} else {
+ /* Remaining boot caches */
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
@@ -2331,11 +2282,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
/**
* __kmem_cache_create - Create a cache.
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
+ * @cachep: cache management descriptor
* @flags: SLAB flags
- * @ctor: A constructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2326,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
- /*
- * Always checks flags, a caller might be expecting debug support which
- * isn't available.
- */
- BUG_ON(flags & ~CREATE_MASK);
/*
* Check that size is in terms of words. This is needed to avoid
@@ -2394,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate the final buffer alignment: */
-
- /* 1) arch recommendation: can be overridden for debug */
- if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
- */
- ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- } else {
- ralign = BYTES_PER_WORD;
- }
-
/*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(REDZONE_ALIGN - 1);
}
- /* 2) arch mandated alignment */
- if (ralign < ARCH_SLAB_MINALIGN) {
- ralign = ARCH_SLAB_MINALIGN;
- }
/* 3) caller mandated alignment */
if (ralign < cachep->align) {
ralign = cachep->align;
@@ -2447,7 +2370,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
else
gfp = GFP_NOWAIT;
- cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+ setup_nodelists_pointer(cachep);
#if DEBUG
/*
@@ -3969,12 +3892,6 @@ void kfree(const void *objp)
}
EXPORT_SYMBOL(kfree);
-unsigned int kmem_cache_size(struct kmem_cache *cachep)
-{
- return cachep->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/