summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_rt.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2fe98ed474d..fd18f395a1b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -124,7 +124,7 @@
static inline int rt_policy(int policy)
{
- if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
return 0;
}
@@ -2486,7 +2486,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
- if (unlikely(rq->idle_stamp)) {
+ if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 88725c939e0..08e937496b2 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1126,7 +1126,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = &rq->rt;
- if (unlikely(!rt_rq->rt_nr_running))
+ if (!rt_rq->rt_nr_running)
return NULL;
if (rt_rq_throttled(rt_rq))
@@ -1544,7 +1544,7 @@ skip:
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
+ if (rq->rt.highest_prio.curr > prev->prio)
pull_rt_task(rq);
}