summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-03-18 23:29:41 -0400
committerDavid S. Miller <davem@davemloft.net>2012-03-18 23:29:41 -0400
commit4da0bd736552e6377b407b3c3d3ae518ebbdd269 (patch)
treef0da9f843b8033565c3ca4103fccb17a60688326 /mm/memcontrol.c
parent81a430ac1b88b0702c57d2513e247317e810e04d (diff)
parentc16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c55
1 files changed, 48 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5585dc3d364..58a08fc7414 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -230,10 +230,30 @@ struct mem_cgroup {
* the counter to account for memory usage
*/
struct res_counter res;
- /*
- * the counter to account for mem+swap usage.
- */
- struct res_counter memsw;
+
+ union {
+ /*
+ * the counter to account for mem+swap usage.
+ */
+ struct res_counter memsw;
+
+ /*
+ * rcu_freeing is used only when freeing struct mem_cgroup,
+ * so put it into a union to avoid wasting more memory.
+ * It must be disjoint from the css field. It could be
+ * in a union with the res field, but res plays a much
+ * larger part in mem_cgroup life than memsw, and might
+ * be of interest, even at time of free, when debugging.
+ * So share rcu_head with the less interesting memsw.
+ */
+ struct rcu_head rcu_freeing;
+ /*
+ * But when using vfree(), that cannot be done at
+ * interrupt time, so we must then queue the work.
+ */
+ struct work_struct work_freeing;
+ };
+
/*
* Per cgroup active and inactive list, similar to the
* per zone LRU lists.
@@ -4780,6 +4800,27 @@ out_free:
}
/*
+ * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * but in process context. The work_freeing structure is overlaid
+ * on the rcu_freeing structure, which itself is overlaid on memsw.
+ */
+static void vfree_work(struct work_struct *work)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = container_of(work, struct mem_cgroup, work_freeing);
+ vfree(memcg);
+}
+static void vfree_rcu(struct rcu_head *rcu_head)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
+ INIT_WORK(&memcg->work_freeing, vfree_work);
+ schedule_work(&memcg->work_freeing);
+}
+
+/*
* At destroying mem_cgroup, references from swap_cgroup can remain.
* (scanning all at force_empty is too costly...)
*
@@ -4802,9 +4843,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
- kfree(memcg);
+ kfree_rcu(memcg, rcu_freeing);
else
- vfree(memcg);
+ call_rcu(&memcg->rcu_freeing, vfree_rcu);
}
static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5075,7 +5116,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return NULL;
if (PageAnon(page)) {
/* we don't move shared anon */
- if (!move_anon() || page_mapcount(page) > 1)
+ if (!move_anon() || page_mapcount(page) > 2)
return NULL;
} else if (!move_file())
/* we ignore mapcount for file pages */