diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/chip.c | 5 | ||||
-rw-r--r-- | kernel/irq/resend.c | 7 | ||||
-rw-r--r-- | kernel/kprobes.c | 5 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 3 | ||||
-rw-r--r-- | kernel/profile.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 63 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 19 | ||||
-rw-r--r-- | kernel/sysctl.c | 2 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 10 |
10 files changed, 64 insertions, 56 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 615ce97c6cf..f1a73f0b54e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -352,13 +352,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) * keep it masked and get out of here */ action = desc->action; - if (unlikely(!action || (desc->status & IRQ_DISABLED))) { - desc->status |= IRQ_PENDING; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) goto out_unlock; - } desc->status |= IRQ_INPROGRESS; - desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 5bfeaed7e48..a8046791ba2 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -62,7 +62,12 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) */ desc->chip->enable(irq); - if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + /* + * We do not resend level type interrupts. Level type + * interrupts are resent by hardware when they are still + * active. + */ + if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; if (!desc->chip || !desc->chip->retrigger || diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3e9f513a728..4b8a4493c54 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1063,6 +1063,11 @@ EXPORT_SYMBOL_GPL(register_kprobe); EXPORT_SYMBOL_GPL(unregister_kprobe); EXPORT_SYMBOL_GPL(register_jprobe); EXPORT_SYMBOL_GPL(unregister_jprobe); +#ifdef CONFIG_KPROBES EXPORT_SYMBOL_GPL(jprobe_return); +#endif + +#ifdef CONFIG_KPROBES EXPORT_SYMBOL_GPL(register_kretprobe); EXPORT_SYMBOL_GPL(unregister_kretprobe); +#endif diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index a3b7854b8f7..a686590d88c 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -709,7 +709,8 @@ static void mark_nosave_pages(struct memory_bitmap *bm) region->end_pfn << PAGE_SHIFT); for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) - memory_bm_set_bit(bm, pfn); + if (pfn_valid(pfn)) + memory_bm_set_bit(bm, pfn); } } diff --git a/kernel/profile.c b/kernel/profile.c index 5b20fe977be..cb1e37d2dac 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -199,11 +199,11 @@ EXPORT_SYMBOL_GPL(register_timer_hook); EXPORT_SYMBOL_GPL(unregister_timer_hook); EXPORT_SYMBOL_GPL(task_handoff_register); EXPORT_SYMBOL_GPL(task_handoff_unregister); +EXPORT_SYMBOL_GPL(profile_event_register); +EXPORT_SYMBOL_GPL(profile_event_unregister); #endif /* CONFIG_PROFILING */ -EXPORT_SYMBOL_GPL(profile_event_register); -EXPORT_SYMBOL_GPL(profile_event_unregister); #ifdef CONFIG_SMP /* diff --git a/kernel/sched.c b/kernel/sched.c index b0afd8db139..45e17b83b7f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -263,6 +263,7 @@ struct rq { unsigned int clock_warps, clock_overflows; unsigned int clock_unstable_events; + u64 tick_timestamp; atomic_t nr_iowait; @@ -341,8 +342,11 @@ static void __update_rq_clock(struct rq *rq) /* * Catch too large forward jumps too: */ - if (unlikely(delta > 2*TICK_NSEC)) { - clock++; + if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { + if (clock < rq->tick_timestamp + TICK_NSEC) + clock = rq->tick_timestamp + TICK_NSEC; + else + clock++; rq->clock_overflows++; } else { if (unlikely(delta > rq->clock_max_delta)) @@ -3102,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) if (need_resched()) break; - rebalance_domains(balance_cpu, SCHED_IDLE); + rebalance_domains(balance_cpu, CPU_IDLE); rq = cpu_rq(balance_cpu); if (time_after(this_rq->next_balance, rq->next_balance)) @@ -3308,9 +3312,16 @@ void scheduler_tick(void) int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; + u64 next_tick = rq->tick_timestamp + TICK_NSEC; spin_lock(&rq->lock); __update_rq_clock(rq); + /* + * Let rq->clock advance by at least TICK_NSEC: + */ + if (unlikely(rq->clock < next_tick)) + rq->clock = next_tick; + rq->tick_timestamp = rq->clock; update_cpu_load(rq); if (curr != rq->idle) /* FIXME: needed? */ curr->sched_class->task_tick(rq, curr); @@ -6317,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) -int arch_reinit_sched_domains(void) +static int arch_reinit_sched_domains(void) { int err; @@ -6346,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) return ret ? ret : count; } -int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) -{ - int err = 0; - -#ifdef CONFIG_SCHED_SMT - if (smt_capable()) - err = sysfs_create_file(&cls->kset.kobj, - &attr_sched_smt_power_savings.attr); -#endif -#ifdef CONFIG_SCHED_MC - if (!err && mc_capable()) - err = sysfs_create_file(&cls->kset.kobj, - &attr_sched_mc_power_savings.attr); -#endif - return err; -} -#endif - #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) { @@ -6374,8 +6367,8 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 0); } -SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, - sched_mc_power_savings_store); +static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, + sched_mc_power_savings_store); #endif #ifdef CONFIG_SCHED_SMT @@ -6388,8 +6381,26 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 1); } -SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, - sched_smt_power_savings_store); +static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, + sched_smt_power_savings_store); +#endif + +int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) +{ + int err = 0; + +#ifdef CONFIG_SCHED_SMT + if (smt_capable()) + err = sysfs_create_file(&cls->kset.kobj, + &attr_sched_smt_power_savings.attr); +#endif +#ifdef CONFIG_SCHED_MC + if (!err && mc_capable()) + err = sysfs_create_file(&cls->kset.kobj, + &attr_sched_mc_power_savings.attr); +#endif + return err; +} #endif /* diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 3da32156394..87e524762b8 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -108,7 +108,7 @@ print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { - SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq); + SEQ_printf(m, "\ncfs_rq\n"); #define P(x) \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x)) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e91db32cadf..fedbb51bba9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -75,7 +75,7 @@ enum { unsigned int sysctl_sched_features __read_mostly = SCHED_FEAT_FAIR_SLEEPERS *1 | - SCHED_FEAT_SLEEPER_AVG *1 | + SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | SCHED_FEAT_START_DEBIT *1 | @@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { - delta = calc_delta_mine(cfs_rq->sleeper_bonus, - curr->load.weight, lw); - if (unlikely(delta > cfs_rq->sleeper_bonus)) - delta = cfs_rq->sleeper_bonus; - + delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); + delta = calc_delta_mine(delta, curr->load.weight, lw); + delta = min((u64)delta, cfs_rq->sleeper_bonus); cfs_rq->sleeper_bonus -= delta; delta_mine -= delta; } @@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) * Track the amount of bonus we've given to sleepers: */ cfs_rq->sleeper_bonus += delta_fair; + if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) + cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } @@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { #ifdef CONFIG_FAIR_GROUP_SCHED struct cfs_rq *this_cfs_rq; - long imbalances; + long imbalance; unsigned long maxload; this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); - imbalance = busy_cfs_rq->load.weight - - this_cfs_rq->load.weight; + imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ if (imbalance <= 0) continue; @@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, *this_best_prio = cfs_rq_best_prio(this_cfs_rq); #else -#define maxload rem_load_move +# define maxload rem_load_move #endif /* pass busy_cfs_rq argument into * load_balance_[start|next]_fair iterators diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 79c891e6266..8bdb8c07e04 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1023,6 +1023,7 @@ static ctl_table vm_table[] = { .mode = 0644, .proc_handler = &proc_doulongvec_minmax, }, +#endif #ifdef CONFIG_NUMA { .ctl_name = CTL_UNNUMBERED, @@ -1034,7 +1035,6 @@ static ctl_table vm_table[] = { .strategy = &sysctl_string, }, #endif -#endif #if defined(CONFIG_X86_32) || \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 2ad1c37b8df..41dd3105ce7 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -113,16 +113,6 @@ int clockevents_register_notifier(struct notifier_block *nb) return ret; } -/** - * clockevents_unregister_notifier - unregister a clock events change listener - */ -void clockevents_unregister_notifier(struct notifier_block *nb) -{ - spin_lock(&clockevents_lock); - raw_notifier_chain_unregister(&clockevents_chain, nb); - spin_unlock(&clockevents_lock); -} - /* * Notify about a clock event change. Called with clockevents_lock * held. |