diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-09-04 14:31:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-09-04 14:31:00 +0200 |
commit | 59f979455d7209171ab10a72c8df5c2512976cb4 (patch) | |
tree | c4c7fc48bd79bf8acbe848a1b979fa9e8ab4ac6a /arch/arm/kernel | |
parent | b9bb50db9126c4ccad78af2dfb77277ca17c9b64 (diff) | |
parent | 9450d57eab5cad36774c297da123062744472588 (diff) |
Merge branch 'sched/urgent' into sched/core
Merge in the current fixes branch, we are going to apply dependent patches.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 24 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 2 |
2 files changed, 25 insertions, 1 deletions
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 27d186abbc0..f4515393248 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -21,6 +21,8 @@ struct clock_data { u32 epoch_cyc_copy; u32 mult; u32 shift; + bool suspended; + bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); @@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) u64 epoch_ns; u32 epoch_cyc; + if (cd.suspended) + return cd.epoch_ns; + /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and @@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } +void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate) +{ + setup_sched_clock(read, bits, rate); + cd.needs_suspend = true; +} + void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); + if (cd.needs_suspend) + cd.suspended = true; return 0; } +static void sched_clock_resume(void) +{ + if (cd.needs_suspend) { + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; + } +} + static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, + .resume = sched_clock_resume, }; static int __init sched_clock_syscore_init(void) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 198b08456e9..26c12c6440f 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid) * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ -void init_cpu_topology(void) +void __init init_cpu_topology(void) { unsigned int cpu; |