summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c47
-rw-r--r--kernel/sched_fair.c3
-rw-r--r--kernel/trace/trace_sched_switch.c10
3 files changed, 2 insertions, 58 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1ec3fb2efee..ad95cca4e42 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag)
#endif /* CONFIG_SMP */
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-
-void ftrace_task(struct task_struct *p, void *__tr, void *__data)
-{
-#if 0
- /*
- * trace timeline tree
- */
- __trace_special(__tr, __data,
- p->pid, p->se.vruntime, p->se.sum_exec_runtime);
-#else
- /*
- * trace balance metrics
- */
- __trace_special(__tr, __data,
- p->pid, p->se.avg_overlap, 0);
-#endif
-}
-
-void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
-{
- struct task_struct *p;
- struct sched_entity *se;
- struct rb_node *curr;
- struct rq *rq = __rq;
-
- if (rq->cfs.curr) {
- p = task_of(rq->cfs.curr);
- ftrace_task(p, __tr, __data);
- }
- if (rq->cfs.next) {
- p = task_of(rq->cfs.next);
- ftrace_task(p, __tr, __data);
- }
-
- for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
- se = rb_entry(curr, struct sched_entity, run_node);
- if (!entity_is_task(se))
- continue;
-
- p = task_of(se);
- ftrace_task(p, __tr, __data);
- }
-}
-
-#endif
-
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index dc1856f1079..e24ecd39c4b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0;
- ftrace_special(__LINE__, curr->se.avg_overlap, sync);
- ftrace_special(__LINE__, p->se.avg_overlap, -1);
/*
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (unlikely(se == pse))
return;
- ftrace_special(__LINE__, p->pid, se->last_wakeup);
cfs_rq_of(pse)->next = pse;
/*
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index bddf676914e..5671db0e182 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
+ if (likely(disabled == 1))
tracing_sched_switch_trace(tr, data, prev, next, flags);
- if (trace_flags & TRACE_ITER_SCHED_TREE)
- ftrace_all_fair_tasks(__rq, tr, data);
- }
atomic_dec(&data->disabled);
local_irq_restore(flags);
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
+ if (likely(disabled == 1))
tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
- if (trace_flags & TRACE_ITER_SCHED_TREE)
- ftrace_all_fair_tasks(__rq, tr, data);
- }
atomic_dec(&data->disabled);
local_irq_restore(flags);