summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-25 14:45:27 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-25 14:55:01 +0200
commit6ab423e0eaca827fbd201ca4ae7d4f8573a366b2 (patch)
tree072d227934bf213adf5c0dd022369e14f273dc48 /kernel
parent771d7cde144d87f2d1fbee4da3c6234d61f7e42a (diff)
perf_counter: Propagate inheritance failures down the fork() path
Fail fork() when we fail inheritance for some reason (-ENOMEM most likely). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090525124600.324656474@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/perf_counter.c20
2 files changed, 17 insertions, 9 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 675e01e9072..c07c3335cea 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1095,7 +1095,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
- perf_counter_init_task(p);
+
+ retval = perf_counter_init_task(p);
+ if (retval)
+ goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
@@ -1295,6 +1298,7 @@ bad_fork_cleanup_semundo:
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
+ perf_counter_exit_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 217dbcce2eb..7a7a144870e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3434,18 +3434,23 @@ again:
/*
* Initialize the perf_counter context in task_struct
*/
-void perf_counter_init_task(struct task_struct *child)
+int perf_counter_init_task(struct task_struct *child)
{
struct perf_counter_context *child_ctx, *parent_ctx;
struct perf_counter *counter;
struct task_struct *parent = current;
int inherited_all = 1;
+ int ret = 0;
child->perf_counter_ctxp = NULL;
mutex_init(&child->perf_counter_mutex);
INIT_LIST_HEAD(&child->perf_counter_list);
+ parent_ctx = parent->perf_counter_ctxp;
+ if (likely(!parent_ctx || !parent_ctx->nr_counters))
+ return 0;
+
/*
* This is executed from the parent task context, so inherit
* counters that have been marked for cloning.
@@ -3454,11 +3459,7 @@ void perf_counter_init_task(struct task_struct *child)
child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
if (!child_ctx)
- return;
-
- parent_ctx = parent->perf_counter_ctxp;
- if (likely(!parent_ctx || !parent_ctx->nr_counters))
- return;
+ return -ENOMEM;
__perf_counter_init_context(child_ctx, child);
child->perf_counter_ctxp = child_ctx;
@@ -3482,8 +3483,9 @@ void perf_counter_init_task(struct task_struct *child)
continue;
}
- if (inherit_group(counter, parent,
- parent_ctx, child, child_ctx)) {
+ ret = inherit_group(counter, parent, parent_ctx,
+ child, child_ctx);
+ if (ret) {
inherited_all = 0;
break;
}
@@ -3505,6 +3507,8 @@ void perf_counter_init_task(struct task_struct *child)
}
mutex_unlock(&parent_ctx->mutex);
+
+ return ret;
}
static void __cpuinit perf_counter_init_cpu(int cpu)