summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic.c12
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c20
-rw-r--r--arch/x86/kernel/cpu/intel.c13
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c15
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c3
-rw-r--r--arch/x86/kernel/ds.c31
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S1
-rw-r--r--arch/x86/kernel/hpet.c15
-rw-r--r--arch/x86/kernel/io_apic.c5
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/kernel/kprobes.c2
-rw-r--r--arch/x86/kernel/mpparse.c1
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/signal.c11
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/tlb_uv.c1
-rw-r--r--arch/x86/kernel/vmi_32.c2
20 files changed, 101 insertions, 63 deletions
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 566a08466b1..115449f869e 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -47,6 +47,7 @@
#include <asm/proto.h>
#include <asm/apic.h>
#include <asm/i8259.h>
+#include <asm/smp.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -894,6 +895,10 @@ void disable_local_APIC(void)
{
unsigned int value;
+ /* APIC hasn't been mapped yet */
+ if (!apic_phys)
+ return;
+
clear_local_APIC();
/*
@@ -1431,7 +1436,7 @@ static int __init detect_init_APIC(void)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
- (boot_cpu_data.x86 == 15))
+ (boot_cpu_data.x86 >= 15))
break;
goto no_apic;
case X86_VENDOR_INTEL:
@@ -1832,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
num_processors++;
cpu = cpumask_next_zero(-1, cpu_present_mask);
+ if (version != apic_version[boot_cpu_physical_apicid])
+ WARN_ONCE(1,
+ "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
+ apic_version[boot_cpu_physical_apicid], cpu, version);
+
physid_set(apicid, phys_cpu_present_map);
if (apicid == boot_cpu_physical_apicid) {
/*
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 06fcd8f9323..4b1c319d30c 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -145,7 +145,7 @@ typedef union {
struct drv_cmd {
unsigned int type;
- cpumask_var_t mask;
+ const struct cpumask *mask;
drv_addr_union addr;
u32 val;
};
@@ -231,15 +231,9 @@ static u32 get_cur_val(const struct cpumask *mask)
return 0;
}
- if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
- return 0;
-
- cpumask_copy(cmd.mask, mask);
-
+ cmd.mask = mask;
drv_read(&cmd);
- free_cpumask_var(cmd.mask);
-
dprintk("get_cur_val = %u\n", cmd.val);
return cmd.val;
@@ -369,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return freq;
}
-static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
+static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
struct acpi_cpufreq_data *data)
{
unsigned int cur_freq;
@@ -404,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return -ENODEV;
}
- if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
- return -ENOMEM;
-
perf = data->acpi_data;
result = cpufreq_frequency_table_target(policy,
data->freq_table,
@@ -451,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
/* cpufreq holds the hotplug lock, so we are safe from here on */
if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
- cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
+ cmd.mask = policy->cpus;
else
- cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
+ cmd.mask = cpumask_of(policy->cpu);
freqs.old = perf->states[perf->state].core_frequency * 1000;
freqs.new = data->freq_table[next_state].frequency;
@@ -480,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
perf->state = next_perf_state;
out:
- free_cpumask_var(cmd.mask);
return result;
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8ea6929e974..430e5c38a54 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -29,6 +29,19 @@
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
+ /* Unmask CPUID levels if masked: */
+ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
+ u64 misc_enable;
+
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+ if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
+ misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
+ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ c->cpuid_level = cpuid_eax(0);
+ }
+ }
+
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 48533d77be7..da299eb85fc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
+ { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
+ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
+ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
{ 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
{ 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
+ { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
+ { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
+ { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
+ { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
+ { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
+ { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
+ { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
+ { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
+ { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
+ { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
+ { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
+ { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
{ 0x00, 0, 0}
};
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b59ddcc88cd..0c0a455fe95 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -33,11 +33,13 @@ u64 mtrr_tom2;
struct mtrr_state_type mtrr_state = {};
EXPORT_SYMBOL_GPL(mtrr_state);
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "mtrr."
-
-static int mtrr_show;
-module_param_named(show, mtrr_show, bool, 0);
+static int __initdata mtrr_show;
+static int __init mtrr_debug(char *opt)
+{
+ mtrr_show = 1;
+ return 0;
+}
+early_param("mtrr.show", mtrr_debug);
/*
* Returns the effective MTRR type for the region
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index d259e5d2e05..236a401b825 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
if (!highest_pfn) {
- WARN(!kvm_para_available(), KERN_WARNING
- "WARNING: strange, CPU MTRRs all blank?\n");
+ printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
return 0;
}
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index da91701a234..169a120587b 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -15,8 +15,8 @@
* - buffer allocation (memory accounting)
*
*
- * Copyright (C) 2007-2008 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
+ * Copyright (C) 2007-2009 Intel Corporation.
+ * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
*/
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
}
static const struct ds_configuration ds_cfg_netburst = {
- .name = "netburst",
+ .name = "Netburst",
.ctl[dsf_bts] = (1 << 2) | (1 << 3),
.ctl[dsf_bts_kernel] = (1 << 5),
.ctl[dsf_bts_user] = (1 << 6),
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = {
#endif
};
static const struct ds_configuration ds_cfg_pentium_m = {
- .name = "pentium m",
+ .name = "Pentium M",
.ctl[dsf_bts] = (1 << 6) | (1 << 7),
.sizeof_field = sizeof(long),
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = {
.sizeof_rec[ds_pebs] = sizeof(long) * 18,
#endif
};
-static const struct ds_configuration ds_cfg_core2 = {
- .name = "core 2",
+static const struct ds_configuration ds_cfg_core2_atom = {
+ .name = "Core 2/Atom",
.ctl[dsf_bts] = (1 << 6) | (1 << 7),
.ctl[dsf_bts_kernel] = (1 << 9),
.ctl[dsf_bts_user] = (1 << 10),
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
switch (c->x86) {
case 0x6:
switch (c->x86_model) {
- case 0 ... 0xC:
- /* sorry, don't know about them */
- break;
- case 0xD:
- case 0xE: /* Pentium M */
+ case 0x9:
+ case 0xd: /* Pentium M */
ds_configure(&ds_cfg_pentium_m);
break;
- default: /* Core2, Atom, ... */
- ds_configure(&ds_cfg_core2);
+ case 0xf:
+ case 0x17: /* Core2 */
+ case 0x1c: /* Atom */
+ ds_configure(&ds_cfg_core2_atom);
+ break;
+ case 0x1a: /* i7 */
+ default:
+ /* sorry, don't know about them */
break;
}
break;
- case 0xF:
+ case 0xf:
switch (c->x86_model) {
case 0x0:
case 0x1:
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index d6f0490a739..46469029e9d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1203,7 +1203,6 @@ nmi_stack_correct:
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
- TRACE_IRQS_OFF
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -1244,7 +1243,6 @@ nmi_espfix_stack:
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
- TRACE_IRQS_OFF
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e28c7a98779..a1346217e43 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -346,6 +346,7 @@ ENTRY(save_args)
popq_cfi %rax /* move return address... */
mov %gs:pda_irqstackptr,%rsp
EMPTY_FRAME 0
+ pushq_cfi %rbp /* backlink for unwinder */
pushq_cfi %rax /* ... to the new stack */
/*
* We entered an interrupt context - irqs are off:
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index cd759ad9069..388254f69a2 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -628,11 +628,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_ONLINE:
- INIT_DELAYED_WORK(&work.work, hpet_work);
+ INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
init_completion(&work.complete);
/* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0);
wait_for_completion(&work.complete);
+ destroy_timer_on_stack(&work.work.timer);
break;
case CPU_DEAD:
if (hdev) {
@@ -896,7 +897,7 @@ static unsigned long hpet_rtc_flags;
static int hpet_prev_update_sec;
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
-static unsigned long hpet_t1_cmp;
+static u32 hpet_t1_cmp;
static unsigned long hpet_default_delta;
static unsigned long hpet_pie_delta;
static unsigned long hpet_pie_limit;
@@ -904,6 +905,14 @@ static unsigned long hpet_pie_limit;
static rtc_irq_handler irq_handler;
/*
+ * Check that the hpet counter c1 is ahead of the c2
+ */
+static inline int hpet_cnt_ahead(u32 c1, u32 c2)
+{
+ return (s32)(c2 - c1) < 0;
+}
+
+/*
* Registers a IRQ handler.
*/
int hpet_register_irq_handler(rtc_irq_handler handler)
@@ -1074,7 +1083,7 @@ static void hpet_rtc_timer_reinit(void)
hpet_t1_cmp += delta;
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
lost_ints++;
- } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
+ } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
if (lost_ints) {
if (hpet_rtc_flags & RTC_PIE)
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 1c4a1302536..9b0c480c383 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp)
vector = ~get_irq_regs()->orig_ax;
me = smp_processor_id();
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
*descp = desc = move_irq_desc(desc, me);
/* get the new one */
cfg = desc->chip_data;
#endif
-
- if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
send_cleanup_vector(cfg);
+ }
}
#else
static inline void irq_complete_move(struct irq_desc **descp) {}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 1507ad4e674..10a09c2f182 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
}
}
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .mask = CPU_MASK_NONE,
- .name = "cascade",
-};
-
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
- if (!acpi_ioapic)
- setup_irq(2, &irq2);
-
/* setup after call gates are initialised (usually add in
* the architecture specific gates)
*/
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 884d985b8b8..e948b28a5a9 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -446,7 +446,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
if (p->ainsn.boostable == 1 && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index c0601c2848a..a649a4ccad4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,6 +27,7 @@
#include <asm/e820.h>
#include <asm/trampoline.h>
#include <asm/setup.h>
+#include <asm/smp.h>
#include <mach_apic.h>
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 00c2bcd4146..d5768b1af08 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -5,7 +5,7 @@
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
- * See Documentation/DMA-mapping.txt for the interface specification.
+ * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 55c46074eba..01161077a49 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void)
#ifdef CONFIG_X86_64
/* correctly size the local cpu masks */
-static void setup_cpu_local_masks(void)
+static void __init setup_cpu_local_masks(void)
{
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
alloc_bootmem_cpumask_var(&cpu_callin_mask);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 89bb7668041..df0587f24c5 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -632,9 +632,16 @@ badframe:
}
#ifdef CONFIG_X86_32
-asmlinkage int sys_rt_sigreturn(struct pt_regs regs)
+/*
+ * Note: do not pass in pt_regs directly as with tail-call optimization
+ * GCC will incorrectly stomp on the caller's frame and corrupt user-space
+ * register state:
+ */
+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
{
- return do_rt_sigreturn(&regs);
+ struct pt_regs *regs = (struct pt_regs *)&__unused;
+
+ return do_rt_sigreturn(regs);
}
#else /* !CONFIG_X86_32 */
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d44395ff34c..e2e86a08f31 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -88,7 +88,7 @@ ENTRY(sys_call_table)
.long sys_uselib
.long sys_swapon
.long sys_reboot
- .long old_readdir
+ .long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f885023167e..6812b829ed8 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
destination_timeouts = 0;
}
}
+ cpu_relax();
}
return FLUSH_COMPLETE;
}
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 23206ba1687..1d3302cc2dd 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -858,7 +858,7 @@ void __init vmi_init(void)
#endif
}
-void vmi_activate(void)
+void __init vmi_activate(void)
{
unsigned long flags;