summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-02-14 18:35:22 +0100
committerPekka Enberg <penberg@kernel.org>2011-02-23 11:59:30 +0200
commitb3d41885d9cd0d9db31c8f49e362bae02c96fa3f (patch)
treeab8f134456c9ae06ee0532a8ff0b7296e4d51c16 /mm/slub.c
parent63310467a3d1ed6a0460ec1f4268126cd1ceec2e (diff)
slub: fix kmemcheck calls to match ksize() hints
Recent use of ksize() in network stack (commit ca44ac38 : net: don't reallocate skb->head unless the current one hasn't the needed extra size or is shared) triggers kmemcheck warnings, because ksize() can return more space than kmemcheck is aware of. Pekka Enberg noticed SLAB+kmemcheck is doing the right thing, while SLUB +kmemcheck doesnt. Bugzilla reference #27212 Reported-by: Christian Casteyde <casteyde.christian@free.fr> Suggested-by: Pekka Enberg <penberg@kernel.org> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Christoph Lameter <cl@linux.com> CC: Changli Gao <xiaosuo@gmail.com> CC: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c49
1 files changed, 26 insertions, 23 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d2f343a54ba..217b5b5338a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -797,10 +797,34 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
return should_failslab(s->objsize, flags, s->flags);
}
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->objsize;
+
+#endif
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
- kmemcheck_slab_alloc(s, flags, object, s->objsize);
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
@@ -2690,7 +2714,6 @@ EXPORT_SYMBOL(__kmalloc_node);
size_t ksize(const void *object)
{
struct page *page;
- struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
@@ -2701,28 +2724,8 @@ size_t ksize(const void *object)
WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
}
- s = page->slab;
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->objsize;
-
-#endif
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
+ return slab_ksize(page->slab);
}
EXPORT_SYMBOL(ksize);