summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9b648bd6345..23b5fa4cabd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
/*
* Should page_cgroup's go to their own slab?
* One could optimize the performance of the charging routine
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
struct mem_cgroup_per_zone *mz;
unsigned long flags;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
/*
* Check if our page_cgroup is valid
*/
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
{
struct page_cgroup *pc;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
lock_page_cgroup(page);
pc = page_get_page_cgroup(page);
if (pc)
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
int ret = -EBUSY;
int node, zid;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
@@ -1053,6 +1065,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
+ if (mem_cgroup_subsys.disabled)
+ return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
@@ -1065,6 +1079,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
mm = get_task_mm(p);
if (mm == NULL)
return;