summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2014-11-20 14:42:02 +0100
committerJiri Kosina <jkosina@suse.cz>2014-11-20 14:42:02 +0100
commita02001086bbfb4da35d1228bebc2f1b442db455f (patch)
tree62ab47936cef06fd08657ca5b6cd1df98c19be57 /kernel/time
parenteff264efeeb0898408e8c9df72d8a32621035bed (diff)
parentfc14f9c1272f62c3e8d01300f52467c0d9af50f9 (diff)
Merge Linus' tree to be be to apply submitted patches to newer code than
current trivial.git base
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/alarmtimer.c34
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/hrtimer.c23
-rw-r--r--kernel/time/posix-cpu-timers.c14
-rw-r--r--kernel/time/posix-timers.c1
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c7
-rw-r--r--kernel/time/tick-internal.h7
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c100
-rw-r--r--kernel/time/time.c56
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/time/timer.c4
13 files changed, 156 insertions, 106 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4aec4a45743..a7077d3ae52 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
ktime_t now)
{
+ unsigned long flags;
struct k_itimer *ptr = container_of(alarm, struct k_itimer,
it.alarm.alarmtimer);
- if (posix_timer_event(ptr, 0) != 0)
- ptr->it_overrun++;
+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+ spin_lock_irqsave(&ptr->it_lock, flags);
+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+ if (posix_timer_event(ptr, 0) != 0)
+ ptr->it_overrun++;
+ }
/* Re-add periodic timers */
if (ptr->it.alarm.interval.tv64) {
ptr->it_overrun += alarm_forward(alarm, now,
ptr->it.alarm.interval);
- return ALARMTIMER_RESTART;
+ result = ALARMTIMER_RESTART;
}
- return ALARMTIMER_NORESTART;
+ spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+ return result;
}
/**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
* @new_timer: k_itimer pointer
* @cur_setting: itimerspec data to fill
*
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
*/
static void alarm_timer_get(struct k_itimer *timr,
struct itimerspec *cur_setting)
{
- memset(cur_setting, 0, sizeof(struct itimerspec));
+ ktime_t relative_expiry_time =
+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+ if (ktime_to_ns(relative_expiry_time) > 0) {
+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+ } else {
+ cur_setting->it_value.tv_sec = 0;
+ cur_setting->it_value.tv_nsec = 0;
+ }
- cur_setting->it_interval =
- ktime_to_timespec(timr->it.alarm.interval);
- cur_setting->it_value =
- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
- return;
+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
}
/**
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9c94c19f130..55449909f11 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
* Also omit the add if it would overflow the u64 boundary.
*/
if ((~0ULL - clc > rnd) &&
- (!ismax || evt->mult <= (1U << evt->shift)))
+ (!ismax || evt->mult <= (1ULL << evt->shift)))
clc += rnd;
do_div(clc, evt->mult);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 1c2fe7de284..37e50aadd47 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
int res;
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
*/
static void retrigger_next_event(void *arg)
{
- struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (!hrtimer_hres_active())
return;
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
*/
debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
- reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
+ reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
/*
* We must preserve the CALLBACK state flag here,
* otherwise we could move the timer base in
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* on dynticks target.
*/
wake_up_nohz_cpu(new_base->cpu_base->cpu);
- } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
+ } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
hrtimer_reprogram(timer, new_base)) {
/*
* Only allow reprogramming if the new base is on this CPU.
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
*/
ktime_t hrtimer_get_next_event(void)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base = cpu_base->clock_base;
ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
unsigned long flags;
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
memset(timer, 0, sizeof(struct hrtimer));
- cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
clock_id = CLOCK_MONOTONIC;
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
struct hrtimer_cpu_base *cpu_base;
int base = hrtimer_clockid_to_base(which_clock);
- cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
return 0;
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
*/
void hrtimer_interrupt(struct clock_event_device *dev)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
int i, retries = 0;
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
if (!hrtimer_hres_active())
return;
- td = &__get_cpu_var(tick_cpu_device);
+ td = this_cpu_ptr(&tick_cpu_device);
if (td && td->evtdev)
hrtimer_interrupt(td->evtdev);
}
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
void hrtimer_run_queues(void)
{
struct timerqueue_node *node;
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base;
int index, gettime = 1;
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
local_irq_disable();
old_base = &per_cpu(hrtimer_bases, scpu);
- new_base = &__get_cpu_var(hrtimer_bases);
+ new_base = this_cpu_ptr(&hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1776,7 +1776,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
*/
if (!expires) {
schedule();
- __set_current_state(TASK_RUNNING);
return -EINTR;
}
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 3b8946416a5..492b986195d 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -272,22 +272,8 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
if (same_thread_group(tsk, current))
err = cpu_clock_sample(which_clock, tsk, &rtn);
} else {
- unsigned long flags;
- struct sighand_struct *sighand;
-
- /*
- * while_each_thread() is not yet entirely RCU safe,
- * keep locking the group while sampling process
- * clock for now.
- */
- sighand = lock_task_sighand(tsk, &flags);
- if (!sighand)
- return err;
-
if (tsk == current || thread_group_leader(tsk))
err = cpu_clock_sample_group(which_clock, tsk, &rtn);
-
- unlock_task_sighand(tsk, &flags);
}
if (!err)
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42b463ad90f..31ea01f42e1 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
goto out;
}
} else {
+ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
event.sigev_notify = SIGEV_SIGNAL;
event.sigev_signo = SIGALRM;
event.sigev_value.sival_int = new_timer->it_id;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 64c5990fd50..066f0ec05e4 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
void tick_check_oneshot_broadcast_this_cpu(void)
{
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
/*
* We might be in the middle of switching over from
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 0a0608edeb2..7efeedf53eb 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
void tick_install_replacement(struct clock_event_device *newdev)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int cpu = smp_processor_id();
clockevents_exchange_device(td->evtdev, newdev);
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
void tick_suspend(void)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev);
}
void tick_resume(void)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int broadcast = tick_resume_broadcast();
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
@@ -400,4 +400,5 @@ void tick_resume(void)
void __init tick_init(void)
{
tick_broadcast_init();
+ tick_nohz_init();
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index c19c1d84b6f..366aeb4f2c6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -99,6 +99,13 @@ static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
+/* NO_HZ_FULL internal */
+#ifdef CONFIG_NO_HZ_FULL
+extern void tick_nohz_init(void);
+# else
+static inline void tick_nohz_init(void) { }
+#endif
+
/*
* Broadcasting support
*/
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 824109060a3..7ce740e78e1 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
*/
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *dev = td->evtdev;
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 99aa6ee3908..7b5741fc411 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
*/
void __tick_nohz_full_check(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_nohz_full_cpu(smp_processor_id())) {
if (ts->tick_stopped && !is_idle_task(current)) {
@@ -225,6 +225,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
};
/*
+ * Kick this CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
+ * is NMI safe.
+ */
+void tick_nohz_full_kick(void)
+{
+ if (!tick_nohz_full_cpu(smp_processor_id()))
+ return;
+
+ irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
+/*
* Kick the CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
*/
@@ -281,22 +295,12 @@ out:
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
- int cpu;
-
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
- alloc_bootmem_cpumask_var(&housekeeping_mask);
if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
+ free_bootmem_cpumask_var(tick_nohz_full_mask);
return 1;
}
-
- cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
- pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
- cpumask_clear_cpu(cpu, tick_nohz_full_mask);
- }
- cpumask_andnot(housekeeping_mask,
- cpu_possible_mask, tick_nohz_full_mask);
tick_nohz_full_running = true;
return 1;
@@ -335,18 +339,11 @@ static int tick_nohz_init_all(void)
#ifdef CONFIG_NO_HZ_FULL_ALL
if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
- pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
- return err;
- }
- if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
- pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
+ WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
cpumask_setall(tick_nohz_full_mask);
- cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
- cpumask_clear(housekeeping_mask);
- cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
tick_nohz_full_running = true;
#endif
return err;
@@ -361,6 +358,37 @@ void __init tick_nohz_init(void)
return;
}
+ if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
+ WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
+ cpumask_clear(tick_nohz_full_mask);
+ tick_nohz_full_running = false;
+ return;
+ }
+
+ /*
+ * Full dynticks uses irq work to drive the tick rescheduling on safe
+ * locking contexts. But then we need irq work to raise its own
+ * interrupts to avoid circular dependency on the tick
+ */
+ if (!arch_irq_work_has_interrupt()) {
+ pr_warning("NO_HZ: Can't run full dynticks because arch doesn't "
+ "support irq work self-IPIs\n");
+ cpumask_clear(tick_nohz_full_mask);
+ cpumask_copy(housekeeping_mask, cpu_possible_mask);
+ tick_nohz_full_running = false;
+ return;
+ }
+
+ cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
+ pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
+ cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+ }
+
+ cpumask_andnot(housekeeping_mask,
+ cpu_possible_mask, tick_nohz_full_mask);
+
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);
@@ -545,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, ret = { .tv64 = 0 };
unsigned long rcu_delta_jiffies;
- struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 time_delta;
time_delta = timekeeping_max_deferment();
@@ -558,7 +586,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
} while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
- arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
+ arch_needs_cpu() || irq_work_needs_cpu()) {
next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
} else {
@@ -813,7 +841,7 @@ void tick_nohz_idle_enter(void)
local_irq_disable();
- ts = &__get_cpu_var(tick_cpu_sched);
+ ts = this_cpu_ptr(&tick_cpu_sched);
ts->inidle = 1;
__tick_nohz_idle_enter(ts);
@@ -831,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
*/
void tick_nohz_irq_exit(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->inidle)
__tick_nohz_idle_enter(ts);
@@ -846,7 +874,7 @@ void tick_nohz_irq_exit(void)
*/
ktime_t tick_nohz_get_sleep_length(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->sleep_length;
}
@@ -924,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/
void tick_nohz_idle_exit(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
local_irq_disable();
@@ -959,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
*/
static void tick_nohz_handler(struct clock_event_device *dev)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
@@ -968,6 +996,10 @@ static void tick_nohz_handler(struct clock_event_device *dev)
tick_sched_do_timer(now);
tick_sched_handle(ts, regs);
+ /* No need to reprogram if we are running tickless */
+ if (unlikely(ts->tick_stopped))
+ return;
+
while (tick_nohz_reprogram(ts, now)) {
now = ktime_get();
tick_do_update_jiffies64(now);
@@ -979,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
*/
static void tick_nohz_switch_to_nohz(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t next;
if (!tick_nohz_enabled)
@@ -1041,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
static inline void tick_nohz_irq_enter(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
if (!ts->idle_active && !ts->tick_stopped)
@@ -1095,6 +1127,10 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (regs)
tick_sched_handle(ts, regs);
+ /* No need to reprogram if we are in idle or full dynticks mode */
+ if (unlikely(ts->tick_stopped))
+ return HRTIMER_NORESTART;
+
hrtimer_forward(timer, now, tick_period);
return HRTIMER_RESTART;
@@ -1115,7 +1151,7 @@ early_param("skew_tick", skew_tick);
*/
void tick_setup_sched_timer(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now = ktime_get();
/*
@@ -1184,7 +1220,7 @@ void tick_clock_notify(void)
*/
void tick_oneshot_notify(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
set_bit(0, &ts->check_clocks);
}
@@ -1199,7 +1235,7 @@ void tick_oneshot_notify(void)
*/
int tick_check_oneshot_change(int allow_nohz)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks))
return 0;
diff --git a/kernel/time/time.c b/kernel/time/time.c
index f0294ba1463..a9ae20fb0b1 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*/
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
+ nsec = nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
EXPORT_SYMBOL(timespec_to_jiffies);
void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
}
EXPORT_SYMBOL(jiffies_to_timespec);
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+ return __timespec_to_jiffies(value->tv_sec,
+ value->tv_usec * NSEC_PER_USEC);
}
EXPORT_SYMBOL(timeval_to_jiffies);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f36b02838a4..ec1791fae96 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -338,10 +338,11 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
static inline void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xt;
+ struct timespec xt, wm;
xt = timespec64_to_timespec(tk_xtime(tk));
- update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
+ wm = timespec64_to_timespec(tk->wall_to_monotonic);
+ update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
tk->tkr.cycle_last);
}
@@ -441,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
tk->ntp_error = 0;
ntp_clear();
}
- update_vsyscall(tk);
- update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
tk_update_ktime_data(tk);
+ update_vsyscall(tk);
+ update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
+
if (action & TK_MIRROR)
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
sizeof(tk_core.timekeeper));
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index aca5dfe2fa3..3260ffdb368 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
static void do_init_timer(struct timer_list *timer, unsigned int flags,
const char *name, struct lock_class_key *key)
{
- struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
+ struct tvec_base *base = raw_cpu_read(tvec_bases);
timer->entry.next = NULL;
timer->base = (void *)((unsigned long)base | flags);
@@ -1385,7 +1385,7 @@ void update_process_times(int user_tick)
rcu_check_callbacks(cpu, user_tick);
#ifdef CONFIG_IRQ_WORK
if (in_irq())
- irq_work_run();
+ irq_work_tick();
#endif
scheduler_tick();
run_posix_cpu_timers(p);