diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/nmi.c | 8 | ||||
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 9 |
2 files changed, 9 insertions, 8 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index f5bc7e1be80..a5e34d65596 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -195,6 +195,8 @@ static __cpuinit inline int nmi_known_cpu(void) return 0; } +static int endflag __initdata = 0; + #ifdef CONFIG_SMP /* The performance counters used by NMI_LOCAL_APIC don't trigger when * the CPU is idle. To make sure the NMI watchdog really ticks on all @@ -202,7 +204,6 @@ static __cpuinit inline int nmi_known_cpu(void) */ static __init void nmi_cpu_busy(void *data) { - volatile int *endflag = data; local_irq_enable_in_hardirq(); /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, @@ -210,14 +211,13 @@ static __init void nmi_cpu_busy(void *data) pause instruction. On a real HT machine this is fine because all other CPUs are busy with "useless" delay loops and don't care if they get somewhat less cycles. */ - while (*endflag == 0) - barrier(); + while (endflag == 0) + mb(); } #endif static int __init check_nmi_watchdog(void) { - volatile int endflag = 0; unsigned int *prev_nmi_count; int cpu; diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 27e95e7922c..186aebbae32 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -193,6 +193,8 @@ void nmi_watchdog_default(void) nmi_watchdog = NMI_IO_APIC; } +static int endflag __initdata = 0; + #ifdef CONFIG_SMP /* The performance counters used by NMI_LOCAL_APIC don't trigger when * the CPU is idle. To make sure the NMI watchdog really ticks on all @@ -200,7 +202,6 @@ void nmi_watchdog_default(void) */ static __init void nmi_cpu_busy(void *data) { - volatile int *endflag = data; local_irq_enable_in_hardirq(); /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, @@ -208,14 +209,13 @@ static __init void nmi_cpu_busy(void *data) pause instruction. On a real HT machine this is fine because all other CPUs are busy with "useless" delay loops and don't care if they get somewhat less cycles. */ - while (*endflag == 0) - barrier(); + while (endflag == 0) + mb(); } #endif int __init check_nmi_watchdog (void) { - volatile int endflag = 0; int *counts; int cpu; @@ -256,6 +256,7 @@ int __init check_nmi_watchdog (void) if (!atomic_read(&nmi_active)) { kfree(counts); atomic_set(&nmi_active, -1); + endflag = 1; return -1; } endflag = 1; |