summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Blum <bblum@google.com>2009-09-23 15:56:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 07:20:58 -0700
commitc378369d8b4fa516ff2b1e79c3eded4e0e955ebb (patch)
tree33dff6fa14c5b70cf25368b1bde57e5610a308b3
parentd1d9fd3308fdef6b4bf564fa3d6cfe35b68b50bc (diff)
cgroups: change css_set freeing mechanism to be under RCU
Changes css_set freeing mechanism to be under RCU This is a prepatch for making the procs file writable. In order to free the old css_sets for each task to be moved as they're being moved, the freeing mechanism must be RCU-protected, or else we would have to have a call to synchronize_rcu() for each task before freeing its old css_set. Signed-off-by: Ben Blum <bblum@google.com> Signed-off-by: Paul Menage <menage@google.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--kernel/cgroup.c8
2 files changed, 10 insertions, 1 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 88e86346072..3ac78a2f4b5 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -260,6 +260,9 @@ struct css_set {
* during subsystem registration (at boot time).
*/
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
};
/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3e356b05b2d..bf8dd1a9f2d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -267,6 +267,12 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
return &css_set_table[index];
}
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+ struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+ kfree(cg);
+}
+
/* We don't maintain the lists running through each css_set to its
* task until after the first call to cgroup_iter_start(). This
* reduces the fork()/exit() overhead for people who have cgroups
@@ -310,7 +316,7 @@ static void __put_css_set(struct css_set *cg, int taskexit)
}
write_unlock(&css_set_lock);
- kfree(cg);
+ call_rcu(&cg->rcu_head, free_css_set_rcu);
}
/*