summaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-07-19 14:59:07 -0700
committerOlof Johansson <olof@lixom.net>2014-07-19 14:59:07 -0700
commit4e9816d012dbc28dc89559261c6ffbf8ffc440dd (patch)
treedee9f8b31f3d6d2fb141541da88e1cc1329b017e /include/linux/memcontrol.h
parentda98f44f27d81d7fe9a41f69af4fe08c18d13b56 (diff)
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
Merge tag 'v3.16-rc5' into next/fixes-non-critical
Linux 3.16-rc5
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h32
1 files changed, 6 insertions, 26 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b569b8be5c5..eb65d29516c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -492,13 +492,9 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-char *memcg_create_cache_name(struct mem_cgroup *memcg,
- struct kmem_cache *root_cache);
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache);
void memcg_free_cache_params(struct kmem_cache *s);
-void memcg_register_cache(struct kmem_cache *s);
-void memcg_unregister_cache(struct kmem_cache *s);
int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups);
@@ -506,8 +502,10 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
-void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
-int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
+int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
+void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
+
+int __memcg_cleanup_cache_params(struct kmem_cache *s);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -534,7 +532,7 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
* res_counter_charge_nofail, but we hope those allocations are rare,
* and won't be worth the trouble.
*/
- if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
+ if (gfp & __GFP_NOFAIL)
return true;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
return true;
@@ -583,17 +581,7 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
* @cachep: the original global kmem cache
* @gfp: allocation flags.
*
- * This function assumes that the task allocating, which determines the memcg
- * in the page allocator, belongs to the same cgroup throughout the whole
- * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
- * while belonging to a cgroup, and later on changes. This is considered
- * acceptable, and should only happen upon task migration.
- *
- * Before the cache is created by the memcg core, there is also a possible
- * imbalance: the task belongs to a memcg, but the cache being allocated from
- * is the global cache, since the child cache is not yet guaranteed to be
- * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
- * passed and the page allocator will not attempt any cgroup accounting.
+ * All memory allocated from a per-memcg cache is charged to the owner memcg.
*/
static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
@@ -648,14 +636,6 @@ static inline void memcg_free_cache_params(struct kmem_cache *s)
{
}
-static inline void memcg_register_cache(struct kmem_cache *s)
-{
-}
-
-static inline void memcg_unregister_cache(struct kmem_cache *s)
-{
-}
-
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{