diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_cpupri.c | 2 | ||||
-rw-r--r-- | kernel/sched_debug.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 3 |
4 files changed, 7 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 247fd0fedd0..92e51287b98 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7822,7 +7822,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) free_rootdomain(old_rd); } -static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) +static int init_rootdomain(struct root_domain *rd, bool bootmem) { gfp_t gfp = GFP_KERNEL; diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 7deffc9f0e5..e6c251790dd 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -152,7 +152,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * * Returns: -ENOMEM if memory fails. */ -int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) +int cpupri_init(struct cpupri *cp, bool bootmem) { gfp_t gfp = GFP_KERNEL; int i; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 467ca72f165..70c7e0b7994 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -162,7 +162,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; - struct rq *rq = &per_cpu(runqueues, cpu); + struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; @@ -191,7 +191,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; - rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; + rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); @@ -248,7 +248,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) static void print_cpu(struct seq_file *m, int cpu) { - struct rq *rq = &per_cpu(runqueues, cpu); + struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_X86 { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5f9650e8fe7..ba7fd6e9556 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -430,12 +430,13 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) for_each_sched_entity(se) { struct load_weight *load; + struct load_weight lw; cfs_rq = cfs_rq_of(se); load = &cfs_rq->load; if (unlikely(!se->on_rq)) { - struct load_weight lw = cfs_rq->load; + lw = cfs_rq->load; update_load_add(&lw, se->load.weight); load = &lw; |