diff options
author | Tejun Heo <tj@kernel.org> | 2013-04-07 09:29:51 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-04-07 09:29:51 -0700 |
commit | 47cfcd0922454e49f4923b1e2d31a5bf199237c3 (patch) | |
tree | 2b257e98a90b584fb8bd04a1ff1619cb27f394ae /kernel/cgroup.c | |
parent | b9777cf8d7c7854c3c38bd6621d993b85c2afcdf (diff) |
cgroup: kill cgroup_[un]lock()
Now that locking interface is unexported, there's no reason to keep
around these thin wrappers. Kill them and use mutex operations
directly.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 32ca0304452..1a65958c1a0 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -333,8 +333,8 @@ static inline struct cftype *__d_cft(struct dentry *dentry) * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. * @cgrp: the cgroup to be checked for liveness * - * On success, returns true; the lock should be later released with - * cgroup_unlock(). On failure returns false with no lock held. + * On success, returns true; the mutex should be later unlocked. On + * failure returns false with no lock held. */ static bool cgroup_lock_live_group(struct cgroup *cgrp) { @@ -819,25 +819,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * update of a tasks cgroup pointer by cgroup_attach_task() */ -/** - * cgroup_lock - lock out any changes to cgroup structures - * - */ -static void cgroup_lock(void) -{ - mutex_lock(&cgroup_mutex); -} - -/** - * cgroup_unlock - release lock on cgroup changes - * - * Undo the lock taken in a previous cgroup_lock() call. - */ -static void cgroup_unlock(void) -{ - mutex_unlock(&cgroup_mutex); -} - /* * A couple of forward declarations required, due to cyclic reference loop: * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> @@ -1967,8 +1948,8 @@ static void cgroup_task_migrate(struct cgroup *oldcgrp, * Call holding cgroup_mutex and the group_rwsem of the leader. Will take * task_lock of @tsk or each thread in the threadgroup individually in turn. */ -int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, - bool threadgroup) +static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, + bool threadgroup) { int retval, i, group_size; struct cgroup_subsys *ss, *failed_ss = NULL; @@ -2191,7 +2172,7 @@ retry_find_task: put_task_struct(tsk); out_unlock_cgroup: - cgroup_unlock(); + mutex_unlock(&cgroup_mutex); return ret; } @@ -2205,7 +2186,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) struct cgroupfs_root *root; int retval = 0; - cgroup_lock(); + mutex_lock(&cgroup_mutex); for_each_active_root(root) { struct cgroup *from_cg = task_cgroup_from_root(from, root); @@ -2213,7 +2194,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) if (retval) break; } - cgroup_unlock(); + mutex_unlock(&cgroup_mutex); return retval; } @@ -2240,7 +2221,7 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft, mutex_lock(&cgroup_root_mutex); strcpy(cgrp->root->release_agent_path, buffer); mutex_unlock(&cgroup_root_mutex); - cgroup_unlock(); + mutex_unlock(&cgroup_mutex); return 0; } @@ -2251,7 +2232,7 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft, return -ENODEV; seq_puts(seq, cgrp->root->release_agent_path); seq_putc(seq, '\n'); - cgroup_unlock(); + mutex_unlock(&cgroup_mutex); return 0; } @@ -3271,9 +3252,9 @@ static void cgroup_transfer_one_task(struct task_struct *task, { struct cgroup *new_cgroup = scan->data; - cgroup_lock(); + mutex_lock(&cgroup_mutex); cgroup_attach_task(new_cgroup, task, false); - cgroup_unlock(); + mutex_unlock(&cgroup_mutex); } /** |