summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c246
1 files changed, 132 insertions, 114 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c6100628a6e..70784b848b6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -793,8 +793,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
+#ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep;
+#endif
return csizep->cs_cachep;
}
@@ -1493,13 +1495,15 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
-
- sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
+#ifdef CONFIG_ZONE_DMA
+ sizes->cs_dmacachep = kmem_cache_create(
+ names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL, NULL);
+#endif
sizes++;
names++;
}
@@ -2321,7 +2325,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
- if (flags & SLAB_CACHE_DMA)
+ if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
@@ -2516,7 +2520,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a struct kmem_cache object from the slab cache.
+ * Remove a &struct kmem_cache object from the slab cache.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
@@ -2643,10 +2647,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
- if (flags & GFP_DMA)
- BUG_ON(!(cachep->gfpflags & GFP_DMA));
- else
- BUG_ON(cachep->gfpflags & GFP_DMA);
+ if (CONFIG_ZONE_DMA_FLAG) {
+ if (flags & GFP_DMA)
+ BUG_ON(!(cachep->gfpflags & GFP_DMA));
+ else
+ BUG_ON(cachep->gfpflags & GFP_DMA);
+ }
}
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
@@ -2814,19 +2820,11 @@ failed:
*/
static void kfree_debugcheck(const void *objp)
{
- struct page *page;
-
if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
(unsigned long)objp);
BUG();
}
- page = virt_to_page(objp);
- if (!PageSlab(page)) {
- printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
- (unsigned long)objp);
- BUG();
- }
}
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
@@ -3197,35 +3195,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
-{
- unsigned long save_flags;
- void *objp = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-
- local_irq_save(save_flags);
-
- if (unlikely(NUMA_BUILD &&
- current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
- objp = alternate_node_alloc(cachep, flags);
-
- if (!objp)
- objp = ____cache_alloc(cachep, flags);
- /*
- * We may just have run out of memory on the local node.
- * ____cache_alloc_node() knows how to locate memory on other nodes
- */
- if (NUMA_BUILD && !objp)
- objp = ____cache_alloc_node(cachep, flags, numa_node_id());
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- caller);
- prefetchw(objp);
- return objp;
-}
-
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3257,14 +3226,20 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
-void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
+static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
- struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy))
- ->node_zonelists[gfp_zone(flags)];
+ struct zonelist *zonelist;
+ gfp_t local_flags;
struct zone **z;
void *obj = NULL;
int nid;
- gfp_t local_flags = (flags & GFP_LEVEL_MASK);
+
+ if (flags & __GFP_THISNODE)
+ return NULL;
+
+ zonelist = &NODE_DATA(slab_node(current->mempolicy))
+ ->node_zonelists[gfp_zone(flags)];
+ local_flags = (flags & GFP_LEVEL_MASK);
retry:
/*
@@ -3374,16 +3349,110 @@ must_grow:
if (x)
goto retry;
- if (!(flags & __GFP_THISNODE))
- /* Unable to grow the cache. Fall back to other nodes. */
- return fallback_alloc(cachep, flags);
-
- return NULL;
+ return fallback_alloc(cachep, flags);
done:
return obj;
}
-#endif
+
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ * @caller: return address of caller, used for debug information
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+ void *caller)
+{
+ unsigned long save_flags;
+ void *ptr;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+ local_irq_save(save_flags);
+
+ if (unlikely(nodeid == -1))
+ nodeid = numa_node_id();
+
+ if (unlikely(!cachep->nodelists[nodeid])) {
+ /* Node not bootstrapped yet */
+ ptr = fallback_alloc(cachep, flags);
+ goto out;
+ }
+
+ if (nodeid == numa_node_id()) {
+ /*
+ * Use the locally cached objects if possible.
+ * However ____cache_alloc does not allow fallback
+ * to other nodes. It may fail while we still have
+ * objects on other nodes available.
+ */
+ ptr = ____cache_alloc(cachep, flags);
+ if (ptr)
+ goto out;
+ }
+ /* ___cache_alloc_node can fall back to other nodes */
+ ptr = ____cache_alloc_node(cachep, flags, nodeid);
+ out:
+ local_irq_restore(save_flags);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+
+ return ptr;
+}
+
+static __always_inline void *
+__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+{
+ void *objp;
+
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ objp = alternate_node_alloc(cache, flags);
+ if (objp)
+ goto out;
+ }
+ objp = ____cache_alloc(cache, flags);
+
+ /*
+ * We may just have run out of memory on the local node.
+ * ____cache_alloc_node() knows how to locate memory on other nodes
+ */
+ if (!objp)
+ objp = ____cache_alloc_node(cache, flags, numa_node_id());
+
+ out:
+ return objp;
+}
+#else
+
+static __always_inline void *
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ return ____cache_alloc(cachep, flags);
+}
+
+#endif /* CONFIG_NUMA */
+
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+{
+ unsigned long save_flags;
+ void *objp;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+ local_irq_save(save_flags);
+ objp = __do_cache_alloc(cachep, flags);
+ local_irq_restore(save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ prefetchw(objp);
+
+ return objp;
+}
/*
* Caller needs to acquire correct kmem_list's list_lock
@@ -3582,57 +3651,6 @@ out:
}
#ifdef CONFIG_NUMA
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
-static __always_inline void *
-__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid, void *caller)
-{
- unsigned long save_flags;
- void *ptr = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
-
- if (unlikely(nodeid == -1))
- nodeid = numa_node_id();
-
- if (likely(cachep->nodelists[nodeid])) {
- if (nodeid == numa_node_id()) {
- /*
- * Use the locally cached objects if possible.
- * However ____cache_alloc does not allow fallback
- * to other nodes. It may fail while we still have
- * objects on other nodes available.
- */
- ptr = ____cache_alloc(cachep, flags);
- }
- if (!ptr) {
- /* ___cache_alloc_node can fall back to other nodes */
- ptr = ____cache_alloc_node(cachep, flags, nodeid);
- }
- } else {
- /* Node not bootstrapped yet */
- if (!(flags & __GFP_THISNODE))
- ptr = fallback_alloc(cachep, flags);
- }
-
- local_irq_restore(save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
-
- return ptr;
-}
-
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
@@ -3733,6 +3751,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
BUG_ON(virt_to_cache(objp) != cachep);
local_irq_save(flags);
+ debug_check_no_locks_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
}
@@ -4017,18 +4036,17 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(struct work_struct *unused)
+static void cache_reap(struct work_struct *w)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
+ struct delayed_work *work =
+ container_of(w, struct delayed_work, work);
- if (!mutex_trylock(&cache_chain_mutex)) {
+ if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work),
- round_jiffies_relative(REAPTIMEOUT_CPUC));
- return;
- }
+ goto out;
list_for_each_entry(searchp, &cache_chain, next) {
check_irq_on();
@@ -4071,9 +4089,9 @@ next:
mutex_unlock(&cache_chain_mutex);
next_reap_node();
refresh_cpu_vm_stats(smp_processor_id());
+out:
/* Set up the next iteration */
- schedule_delayed_work(&__get_cpu_var(reap_work),
- round_jiffies_relative(REAPTIMEOUT_CPUC));
+ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
#ifdef CONFIG_PROC_FS