diff options
author | James Morris <james.l.morris@oracle.com> | 2014-01-06 22:23:01 +1100 |
---|---|---|
committer | James Morris <james.l.morris@oracle.com> | 2014-01-06 22:23:01 +1100 |
commit | 38fd2c202a3d82bc12430bce5789fa2c2a406f71 (patch) | |
tree | a73513dbb015155f5236b391709b9083916b3136 /mm/slub.c | |
parent | dcf4e392867bf98d50ad108ed7c2bfb941e8c33d (diff) | |
parent | d6e0a2dd12f4067a5bcefb8bbd8ddbeff800afbc (diff) |
Merge to v3.13-rc7 for prerequisite changes in the Xen code for TPM
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 49 |
1 files changed, 38 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c index c3eb3d3ca83..545a170ebf9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink - * sort the partial list by the number of objects in the. + * sort the partial list by the number of objects in use. */ #define MAX_PARTIAL 10 @@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. */ +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { flags &= gfp_allowed_mask; @@ -955,7 +965,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) kmemleak_free_recursive(x, s->flags); /* - * Trouble is that we may no longer disable interupts in the fast path + * Trouble is that we may no longer disable interrupts in the fast path * So in order to make the debug calls that expect irqs to be * disabled we need to disable interrupts temporarily. */ @@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size, /* * Enable debugging if selected on the kernel commandline. */ - if (slub_debug && (!slub_debug_slabs || - !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) + if (slub_debug && (!slub_debug_slabs || (name && + !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))) flags |= slub_debug; return flags; @@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { return 0; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, - void *object) {} + void *object) +{ + kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, + flags & gfp_allowed_mask); +} -static inline void slab_free_hook(struct kmem_cache *s, void *x) {} +static inline void slab_free_hook(struct kmem_cache *s, void *x) +{ + kmemleak_free_recursive(x, s->flags); +} #endif /* CONFIG_SLUB_DEBUG */ @@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node; * slab on the node for this slabcache. There are no concurrent accesses * possible. * - * Note that this function only works on the kmalloc_node_cache - * when allocating for the kmalloc_node_cache. This is used for bootstrapping + * Note that this function only works on the kmem_cache_node + * when allocating for the kmem_cache_node. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */ static void early_kmem_cache_node_alloc(int node) @@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) if (page) ptr = page_address(page); - kmemleak_alloc(ptr, size, 1, flags); + kmalloc_large_node_hook(ptr, size, flags); return ptr; } @@ -3336,7 +3363,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); - kmemleak_free(x); + kfree_hook(x); __free_memcg_kmem_pages(page, compound_order(page)); return; } @@ -4983,7 +5010,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, * through the descendants with best-effort propagation. */ for_each_memcg_cache_index(i) { - struct kmem_cache *c = cache_from_memcg(s, i); + struct kmem_cache *c = cache_from_memcg_idx(s, i); if (c) attribute->store(c, buf, len); } |