summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-24 10:20:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-24 10:20:00 -0700
commiteb90d81d03c0917b0fd629f6342554a3b58ea52c (patch)
tree4e12232420fa4111937ccd079675ea495d248538 /arch/x86/kernel
parentd3c5f8b93febadf62da9a4b39a2dca8e66a4da40 (diff)
parentb1979a5fda7869a790f4fd83fb06c78498d26ba1 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip: x86: prevent PGE flush from interruption/preemption x86: use explicit copy in vdso_gettimeofday() namespacecheck: automated fixes x86/xen: fix arbitrary_virt_to_machine() x86: don't read maxlvt before checking if APIC is mapped x86: disable TSC for sched_clock() when calibration failed x86: distangle user disabled TSC from unstable x86: fix setup of cyc2ns in tsc_64.c
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic_64.c2
-rw-r--r--arch/x86/kernel/kvmclock.c4
-rw-r--r--arch/x86/kernel/tsc_32.c25
-rw-r--r--arch/x86/kernel/tsc_64.c5
4 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 5910020c3f2..0633cfd0dc2 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier)
*/
void clear_local_APIC(void)
{
- int maxlvt = lapic_get_maxlvt();
+ int maxlvt;
u32 v;
/* APIC hasn't been mapped yet */
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 4bc1be5d547..08a30986d47 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void);
* have elapsed since the hypervisor wrote the data. So we try to account for
* that with system time
*/
-unsigned long kvm_get_wallclock(void)
+static unsigned long kvm_get_wallclock(void)
{
u32 wc_sec, wc_nsec;
u64 delta;
@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void)
return ts.tv_sec + 1;
}
-int kvm_set_wallclock(unsigned long now)
+static int kvm_set_wallclock(unsigned long now)
{
return 0;
}
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e4790728b22..068759db63d 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -14,7 +14,7 @@
#include "mach_timer.h"
-static int tsc_enabled;
+static int tsc_disabled;
/*
* On some systems the TSC frequency does not
@@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz);
static int __init tsc_setup(char *str)
{
printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
- "cannot disable TSC completely.\n");
- mark_tsc_unstable("user disabled TSC");
+ "cannot disable TSC completely.\n");
+ tsc_disabled = 1;
return 1;
}
#else
@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void)
* very important for it to be as fast as the platform
* can achive it. )
*/
- if (unlikely(!tsc_enabled && !tsc_unstable))
+ if (unlikely(tsc_disabled))
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason)
{
if (!tsc_unstable) {
tsc_unstable = 1;
- tsc_enabled = 0;
printk("Marking TSC unstable due to: %s.\n", reason);
/* Can be called before registration */
if (clocksource_tsc.mult)
@@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
- d->ident);
+ d->ident);
tsc_unstable = 1;
return 0;
}
@@ -403,14 +402,22 @@ void __init tsc_init(void)
{
int cpu;
- if (!cpu_has_tsc)
+ if (!cpu_has_tsc || tsc_disabled) {
+ /* Disable the TSC in case of !cpu_has_tsc */
+ tsc_disabled = 1;
return;
+ }
cpu_khz = calculate_cpu_khz();
tsc_khz = cpu_khz;
if (!cpu_khz) {
mark_tsc_unstable("could not calculate TSC khz");
+ /*
+ * We need to disable the TSC completely in this case
+ * to prevent sched_clock() from using it.
+ */
+ tsc_disabled = 1;
return;
}
@@ -441,8 +448,6 @@ void __init tsc_init(void)
if (check_tsc_unstable()) {
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- } else
- tsc_enabled = 1;
-
+ }
clocksource_register(&clocksource_tsc);
}
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index fcc16e58609..1784b8077a1 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -227,14 +227,14 @@ void __init tsc_calibrate(void)
/* hpet or pmtimer available ? */
if (!hpet && !pm1 && !pm2) {
printk(KERN_INFO "TSC calibrated against PIT\n");
- return;
+ goto out;
}
/* Check, whether the sampling was disturbed by an SMI */
if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
printk(KERN_WARNING "TSC calibration disturbed by SMI, "
"using PIT calibration result\n");
- return;
+ goto out;
}
tsc2 = (tsc2 - tsc1) * 1000000L;
@@ -255,6 +255,7 @@ void __init tsc_calibrate(void)
tsc_khz = tsc2 / tsc1;
+out:
for_each_possible_cpu(cpu)
set_cyc2ns_scale(tsc_khz, cpu);
}