summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-02-25 09:16:35 +0200
committerPekka Enberg <penberg@cs.helsinki.fi>2009-02-25 09:16:35 +0200
commitc0bdb232b23b51c23e551041510ad6bea5ce5a92 (patch)
treec904332ff38d1dac3450734a47997b6d0eca07fc /mm/slub.c
parent73d342b169db700b5a6ad626fe4b86911efec8db (diff)
slub: rename calculate_min_partial() to set_min_partial()
As suggested by Christoph Lameter, rename calculate_min_partial() to set_min_partial() as the function doesn't really do any calculations. Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a3e2d552ff4..77268d18e78 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2170,7 +2170,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
}
#endif
-static void calculate_min_partial(struct kmem_cache *s, unsigned long min)
+static void set_min_partial(struct kmem_cache *s, unsigned long min)
{
if (min < MIN_PARTIAL)
min = MIN_PARTIAL;
@@ -2321,7 +2321,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
* The larger the object size is, the more pages we want on the partial
* list to avoid pounding the page allocator excessively.
*/
- calculate_min_partial(s, ilog2(s->size));
+ set_min_partial(s, ilog2(s->size));
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -3853,7 +3853,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
if (err)
return err;
- calculate_min_partial(s, min);
+ set_min_partial(s, min);
return length;
}
SLAB_ATTR(min_partial);