summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-04-15 14:48:13 -0500
committerPekka Enberg <penberg@kernel.org>2011-04-17 14:03:19 +0300
commit5f80b13ae45df7da6646d1881da186318e70b6b6 (patch)
tree2c249101bc1bf0020268ca31d09ae198a7031cb1 /mm
parent33de04ec4cb80b6bd0782e88a64954e60bc15dc1 (diff)
slub: get_map() function to establish map of free objects in a slab
The bit map of free objects in a slab page is determined in various functions if debugging is enabled. Provide a common function for that purpose. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5284fb77967..837f932671a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -271,10 +271,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size)
-/* Scan freelist */
-#define for_each_free_object(__p, __s, __free) \
- for (__p = (__free); __p; __p = get_freepointer((__s), __p))
-
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
@@ -330,6 +326,21 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
return x.x & OO_MASK;
}
+/*
+ * Determine a map of object in use on a page.
+ *
+ * Slab lock or node listlock must be held to guarantee that the page does
+ * not vanish from under us.
+ */
+static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
+{
+ void *p;
+ void *addr = page_address(page);
+
+ for (p = page->freelist; p; p = get_freepointer(s, p))
+ set_bit(slab_index(p, s, addr), map);
+}
+
#ifdef CONFIG_SLUB_DEBUG
/*
* Debug settings:
@@ -2673,9 +2684,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
return;
slab_err(s, page, "%s", text);
slab_lock(page);
- for_each_free_object(p, s, page->freelist)
- set_bit(slab_index(p, s, addr), map);
+ get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) {
@@ -3610,10 +3620,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
/* Now we know that a valid freelist exists */
bitmap_zero(map, page->objects);
- for_each_free_object(p, s, page->freelist) {
- set_bit(slab_index(p, s, addr), map);
- if (!check_object(s, page, p, SLUB_RED_INACTIVE))
- return 0;
+ get_map(s, page, map);
+ for_each_object(p, s, addr, page->objects) {
+ if (test_bit(slab_index(p, s, addr), map))
+ if (!check_object(s, page, p, SLUB_RED_INACTIVE))
+ return 0;
}
for_each_object(p, s, addr, page->objects)
@@ -3821,8 +3832,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
void *p;
bitmap_zero(map, page->objects);
- for_each_free_object(p, s, page->freelist)
- set_bit(slab_index(p, s, addr), map);
+ get_map(s, page, map);
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))