summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-12-16 18:33:49 +0100
committerIngo Molnar <mingo@elte.hu>2009-12-16 18:33:49 +0100
commitee1156c11a1121e118b0a7f2dec240f0d421b1fd (patch)
treeb8771cc5a9758af9d7410fc519227c036c222130 /kernel/sched.c
parentb9f8fcd55bbdb037e5332dbdb7b494f0b70861ac (diff)
parent8bea8672edfca7ec5f661cafb218f1205863b343 (diff)
Merge branch 'linus' into sched/urgent
Conflicts: kernel/sched_idletask.c Merge reason: resolve the conflicts, pick up latest changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c231
1 files changed, 117 insertions, 114 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index db5c26692dd..9c30858b646 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -143,7 +143,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -180,7 +180,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -202,7 +202,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -219,7 +219,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -300,7 +300,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
@@ -472,7 +472,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -527,7 +527,7 @@ static struct root_domain def_root_domain;
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -687,7 +687,7 @@ inline void update_rq_clock(struct rq *rq)
*/
int runqueue_is_locked(int cpu)
{
- return spin_is_locked(&cpu_rq(cpu)->lock);
+ return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -895,7 +895,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -919,9 +919,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
#else
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
#endif
}
@@ -951,10 +951,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
}
@@ -971,10 +971,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -983,19 +983,19 @@ void task_rq_unlock_wait(struct task_struct *p)
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ raw_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1008,7 +1008,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
return rq;
}
@@ -1055,10 +1055,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1071,10 +1071,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -1181,7 +1181,7 @@ static void resched_task(struct task_struct *p)
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1203,10 +1203,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1275,7 +1275,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
@@ -1602,11 +1602,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1708,9 +1708,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
if (root_task_group_empty())
return;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1750,7 +1750,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1771,14 +1771,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1792,7 +1794,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1802,7 +1804,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -2025,13 +2027,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
return;
}
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
@@ -2783,10 +2785,10 @@ static inline void post_schedule(struct rq *rq)
if (rq->post_schedule) {
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
@@ -3068,15 +3070,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3093,9 +3095,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
@@ -4188,14 +4190,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4205,7 +4208,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4387,10 +4390,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ raw_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -5259,11 +5262,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu);
@@ -5457,7 +5460,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5493,7 +5496,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
@@ -6324,7 +6327,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6334,7 +6337,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6358,7 +6361,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(&rq->lock);
+ do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
@@ -6978,7 +6981,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
@@ -6990,7 +6993,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7207,10 +7210,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
}
@@ -7222,7 +7225,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7231,14 +7234,14 @@ static int migration_thread(void *data)
list_del_init(head->next);
if (req->task != NULL) {
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
@@ -7360,14 +7363,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7403,9 +7406,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7671,13 +7674,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7702,13 +7705,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7965,7 +7968,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7991,7 +7994,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -8277,14 +8280,14 @@ enum s_alloc {
*/
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
static int
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
{
if (sg)
- *sg = &per_cpu(sched_group_cpus, cpu).sg;
+ *sg = &per_cpu(sched_groups, cpu).sg;
return cpu;
}
#endif /* CONFIG_SCHED_SMT */
@@ -9347,13 +9350,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -9513,7 +9516,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9573,7 +9576,7 @@ void __init sched_init(void)
#elif defined CONFIG_USER_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
init_tg_rt_entry(&init_task_group,
- &per_cpu(init_rt_rq, i),
+ &per_cpu(init_rt_rq_var, i),
&per_cpu(init_sched_rt_entity, i), i, 1,
root_task_group.rt_se[i]);
#endif
@@ -9611,7 +9614,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9734,13 +9737,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10103,9 +10106,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10290,18 +10293,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10406,15 +10409,15 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
@@ -10705,9 +10708,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10723,9 +10726,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
@@ -10959,9 +10962,9 @@ void synchronize_sched_expedited(void)
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
@@ -10969,11 +10972,11 @@ void synchronize_sched_expedited(void)
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;