diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/alternative.c | 21 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 32 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 34 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/hpet.c | 70 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 71 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 138 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 32 | ||||
-rw-r--r-- | arch/i386/kernel/vmlinux.lds.S | 2 |
10 files changed, 308 insertions, 96 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 9eca21b49f6..426f59b0106 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c @@ -5,15 +5,9 @@ #include <asm/alternative.h> #include <asm/sections.h> -static int no_replacement = 0; static int smp_alt_once = 0; static int debug_alternative = 0; -static int __init noreplacement_setup(char *s) -{ - no_replacement = 1; - return 1; -} static int __init bootonly(char *str) { smp_alt_once = 1; @@ -25,7 +19,6 @@ static int __init debug_alt(char *str) return 1; } -__setup("noreplacement", noreplacement_setup); __setup("smp-alt-boot", bootonly); __setup("debug-alternative", debug_alt); @@ -252,9 +245,6 @@ void alternatives_smp_module_add(struct module *mod, char *name, struct smp_alt_module *smp; unsigned long flags; - if (no_replacement) - return; - if (smp_alt_once) { if (boot_cpu_has(X86_FEATURE_UP)) alternatives_smp_unlock(locks, locks_end, @@ -289,7 +279,7 @@ void alternatives_smp_module_del(struct module *mod) struct smp_alt_module *item; unsigned long flags; - if (no_replacement || smp_alt_once) + if (smp_alt_once) return; spin_lock_irqsave(&smp_alt, flags); @@ -320,7 +310,7 @@ void alternatives_smp_switch(int smp) return; #endif - if (no_replacement || smp_alt_once) + if (smp_alt_once) return; BUG_ON(!smp && (num_online_cpus() > 1)); @@ -386,13 +376,6 @@ extern struct paravirt_patch __start_parainstructions[], void __init alternative_instructions(void) { unsigned long flags; - if (no_replacement) { - printk(KERN_INFO "(SMP-)alternatives turned off\n"); - free_init_pages("SMP alternatives", - (unsigned long)__smp_alt_begin, - (unsigned long)__smp_alt_end); - return; - } local_irq_save(flags); apply_alternatives(__alt_instructions, __alt_instructions_end); diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index e88415282a6..93aa911646a 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -272,32 +272,6 @@ static void __devinit setup_APIC_timer(void) } /* - * Detect systems with known broken BIOS implementations - */ -static int __init lapic_check_broken_bios(struct dmi_system_id *d) -{ - printk(KERN_NOTICE "%s detected: disabling lapic timer.\n", - d->ident); - local_apic_timer_disabled = 1; - return 0; -} - -static struct dmi_system_id __initdata broken_bios_dmi_table[] = { - { - /* - * BIOS exports only C1 state, but uses deeper power - * modes behind the kernels back. - */ - .callback = lapic_check_broken_bios, - .ident = "HP nx6325", - .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), - }, - }, - {} -}; - -/* * In this functions we calibrate APIC bus clocks to the external timer. * * We want to do the calibration only once since we want to have local timer @@ -372,12 +346,12 @@ void __init setup_boot_APIC_clock(void) long delta, deltapm; int pm_referenced = 0; - /* Detect know broken systems */ - dmi_check_system(broken_bios_dmi_table); + if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) + local_apic_timer_disabled = 1; /* * The local apic timer can be disabled via the kernel - * commandline or from the dmi quirk above. Register the lapic + * commandline or from the test above. Register the lapic * timer as a dummy clock event source on SMP systems, so the * broadcast mechanism is used. On UP systems simply ignore it. */ diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 41cfea57232..2d47db48297 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -22,6 +22,37 @@ extern void vide(void); __asm__(".align 4\nvide: ret"); +#define ENABLE_C1E_MASK 0x18000000 +#define CPUID_PROCESSOR_SIGNATURE 1 +#define CPUID_XFAM 0x0ff00000 +#define CPUID_XFAM_K8 0x00000000 +#define CPUID_XFAM_10H 0x00100000 +#define CPUID_XFAM_11H 0x00200000 +#define CPUID_XMOD 0x000f0000 +#define CPUID_XMOD_REV_F 0x00040000 + +/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ +static __cpuinit int amd_apic_timer_broken(void) +{ + u32 lo, hi; + u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + switch (eax & CPUID_XFAM) { + case CPUID_XFAM_K8: + if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) + break; + case CPUID_XFAM_10H: + case CPUID_XFAM_11H: + rdmsr(MSR_K8_ENABLE_C1E, lo, hi); + if (lo & ENABLE_C1E_MASK) + return 1; + break; + default: + /* err on the side of caution */ + return 1; + } + return 0; +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (cpuid_eax(0x80000000) >= 0x80000006) num_cache_leaves = 3; + + if (amd_apic_timer_broken()) + set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index a1f1b715bcf..2b030d6ccbf 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c @@ -758,7 +758,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) NULL, (void *)&pr); /* Check ACPI support for C3 state */ - if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) { + if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { cx = &pr->power.states[ACPI_STATE_C3]; if (cx->address > 0 && cx->latency <= 1000) { longhaul_flags |= USE_ACPI_C3; diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index f3ab61ee749..17d73459fc5 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c @@ -3,6 +3,8 @@ #include <linux/errno.h> #include <linux/hpet.h> #include <linux/init.h> +#include <linux/sysdev.h> +#include <linux/pm.h> #include <asm/hpet.h> #include <asm/io.h> @@ -197,7 +199,7 @@ static int hpet_next_event(unsigned long delta, cnt += delta; hpet_writel(cnt, HPET_T0_CMP); - return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0); + return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; } /* @@ -307,6 +309,7 @@ int __init hpet_enable(void) out_nohpet: iounmap(hpet_virt_address); hpet_virt_address = NULL; + boot_hpet_disable = 1; return 0; } @@ -521,3 +524,68 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } #endif + + +/* + * Suspend/resume part + */ + +#ifdef CONFIG_PM + +static int hpet_suspend(struct sys_device *sys_device, pm_message_t state) +{ + unsigned long cfg = hpet_readl(HPET_CFG); + + cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY); + hpet_writel(cfg, HPET_CFG); + + return 0; +} + +static int hpet_resume(struct sys_device *sys_device) +{ + unsigned int id; + + hpet_start_counter(); + + id = hpet_readl(HPET_ID); + + if (id & HPET_ID_LEGSUP) + hpet_enable_int(); + + return 0; +} + +static struct sysdev_class hpet_class = { + set_kset_name("hpet"), + .suspend = hpet_suspend, + .resume = hpet_resume, +}; + +static struct sys_device hpet_device = { + .id = 0, + .cls = &hpet_class, +}; + + +static __init int hpet_register_sysfs(void) +{ + int err; + + if (!is_hpet_capable()) + return 0; + + err = sysdev_class_register(&hpet_class); + + if (!err) { + err = sysdev_register(&hpet_device); + if (err) + sysdev_class_unregister(&hpet_class); + } + + return err; +} + +device_initcall(hpet_register_sysfs); + +#endif diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index e4408ff4e67..b3ab8ffebd2 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -736,7 +736,7 @@ failed: return 0; } -int __init irqbalance_disable(char *str) +int __devinit irqbalance_disable(char *str) { irqbalance_disabled = 1; return 1; diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index b8f16633a6e..cbe7ec8dbb9 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -567,6 +567,53 @@ static int cpu_request_microcode(int cpu) return error; } +static int apply_microcode_on_cpu(int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + cpumask_t old; + unsigned int val[2]; + int err = 0; + + if (!uci->mc) + return -EINVAL; + + old = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + + /* Check if the microcode we have in memory matches the CPU */ + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64) || uci->sig != cpuid_eax(0x00000001)) + err = -EINVAL; + + if (!err && ((c->x86_model >= 5) || (c->x86 > 6))) { + /* get processor flags from MSR 0x17 */ + rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + if (uci->pf != (1 << ((val[1] >> 18) & 7))) + err = -EINVAL; + } + + if (!err) { + wrmsr(MSR_IA32_UCODE_REV, 0, 0); + /* see notes above for revision 1.07. Apparent chip bug */ + sync_core(); + /* get the current revision from MSR 0x8B */ + rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + if (uci->rev != val[1]) + err = -EINVAL; + } + + if (!err) + apply_microcode(cpu); + else + printk(KERN_ERR "microcode: Could not apply microcode to CPU%d:" + " sig=0x%x, pf=0x%x, rev=0x%x\n", + cpu, uci->sig, uci->pf, uci->rev); + + set_cpus_allowed(current, old); + return err; +} + static void microcode_init_cpu(int cpu) { cpumask_t old; @@ -577,7 +624,8 @@ static void microcode_init_cpu(int cpu) set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); collect_cpu_info(cpu); - if (uci->valid && system_state == SYSTEM_RUNNING) + if (uci->valid && system_state == SYSTEM_RUNNING && + !suspend_cpu_hotplug) cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); set_cpus_allowed(current, old); @@ -663,13 +711,24 @@ static int mc_sysdev_add(struct sys_device *sys_dev) return 0; pr_debug("Microcode:CPU %d added\n", cpu); - memset(uci, 0, sizeof(*uci)); + /* If suspend_cpu_hotplug is set, the system is resuming and we should + * use the data from before the suspend. + */ + if (suspend_cpu_hotplug) { + err = apply_microcode_on_cpu(cpu); + if (err) + microcode_fini_cpu(cpu); + } + if (!uci->valid) + memset(uci, 0, sizeof(*uci)); err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); if (err) return err; - microcode_init_cpu(cpu); + if (!uci->valid) + microcode_init_cpu(cpu); + return 0; } @@ -680,7 +739,11 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) if (!cpu_online(cpu)) return 0; pr_debug("Microcode:CPU %d removed\n", cpu); - microcode_fini_cpu(cpu); + /* If suspend_cpu_hotplug is set, the system is suspending and we should + * keep the microcode in memory for the resume. + */ + if (!suspend_cpu_hotplug) + microcode_fini_cpu(cpu); sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return 0; } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 14702427b10..84c3497efb6 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -41,16 +41,17 @@ int nmi_watchdog_enabled; * different subsystems this reservation system just tries to coordinate * things a little */ -static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); -static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); - -static cpumask_t backtrace_mask = CPU_MASK_NONE; /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) */ #define NMI_MAX_COUNTER_BITS 66 +#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS) + +static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]); +static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]); +static cpumask_t backtrace_mask = CPU_MASK_NONE; /* nmi_active: * >0: the lapic NMI watchdog is active, but can be disabled * <0: the lapic NMI watchdog has not been set up, and cannot @@ -122,64 +123,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) /* checks for a bit availability (hack for oprofile) */ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) { + int cpu; BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) + return 0; + } + return 1; } /* checks the an msr for availability */ int avail_to_resrv_perfctr_nmi(unsigned int msr) { unsigned int counter; + int cpu; counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) + return 0; + } + return 1; } -int reserve_perfctr_nmi(unsigned int msr) +static int __reserve_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) + if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) return 1; return 0; } -void release_perfctr_nmi(unsigned int msr) +static void __release_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); + clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]); } -int reserve_evntsel_nmi(unsigned int msr) +int reserve_perfctr_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_perfctr_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_perfctr_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_perfctr_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_perfctr_nmi(cpu, msr); + } +} + +int __reserve_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) + if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) return 1; return 0; } -void release_evntsel_nmi(unsigned int msr) +static void __release_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); + clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); +} + +int reserve_evntsel_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_evntsel_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_evntsel_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_evntsel_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_evntsel_nmi(cpu, msr); + } } static __cpuinit inline int nmi_known_cpu(void) @@ -263,7 +329,7 @@ static int __init check_nmi_watchdog(void) for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); - mdelay((10*1000)/nmi_hz); // wait 10 ticks + mdelay((20*1000)/nmi_hz); // wait 20 ticks for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP @@ -507,10 +573,10 @@ static int setup_k7_watchdog(void) perfctr_msr = MSR_K7_PERFCTR0; evntsel_msr = MSR_K7_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -533,7 +599,7 @@ static int setup_k7_watchdog(void) wd->check_bit = 1ULL<<63; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -544,8 +610,8 @@ static void stop_k7_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define P6_EVNTSEL0_ENABLE (1 << 22) @@ -563,10 +629,10 @@ static int setup_p6_watchdog(void) perfctr_msr = MSR_P6_PERFCTR0; evntsel_msr = MSR_P6_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -590,7 +656,7 @@ static int setup_p6_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -601,8 +667,8 @@ static void stop_p6_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } /* Note that these events don't tick when the CPU idles. This means @@ -668,10 +734,10 @@ static int setup_p4_watchdog(void) cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); } - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; evntsel = P4_ESCR_EVENT_SELECT(0x3F) @@ -695,7 +761,7 @@ static int setup_p4_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -707,8 +773,8 @@ static void stop_p4_watchdog(void) wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL @@ -736,10 +802,10 @@ static int setup_intel_arch_watchdog(void) perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -764,7 +830,7 @@ static int setup_intel_arch_watchdog(void) wd->check_bit = 1ULL << (eax.split.bit_width - 1); return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -787,8 +853,8 @@ static void stop_intel_arch_watchdog(void) return; wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } void setup_apic_nmi_watchdog (void *unused) diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index fb07a1aad22..697a70e8c0c 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -69,6 +69,7 @@ struct { void (*flush_tlb)(int); void (*set_initial_ap_state)(int, int); void (*halt)(void); + void (*set_lazy_mode)(int mode); } vmi_ops; /* XXX move this to alternative.h */ @@ -574,6 +575,26 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, } #endif +static void vmi_set_lazy_mode(int mode) +{ + static DEFINE_PER_CPU(int, lazy_mode); + + if (!vmi_ops.set_lazy_mode) + return; + + /* Modes should never nest or overlap */ + BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || + mode == PARAVIRT_LAZY_FLUSH)); + + if (mode == PARAVIRT_LAZY_FLUSH) { + vmi_ops.set_lazy_mode(0); + vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); + } else { + vmi_ops.set_lazy_mode(mode); + __get_cpu_var(lazy_mode) = mode; + } +} + static inline int __init check_vmi_rom(struct vrom_header *rom) { struct pci_header *pci; @@ -691,11 +712,14 @@ static void *vmi_get_function(int vmicall) do { \ reloc = call_vrom_long_func(vmi_rom, get_reloc, \ VMI_CALL_##vmicall); \ - if (rel->type != VMI_RELOCATION_NONE) { \ - BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ + if (rel->type == VMI_RELOCATION_CALL_REL) \ paravirt_ops.opname = (void *)rel->eip; \ - } else if (rel->type == VMI_RELOCATION_NOP) \ + else if (rel->type == VMI_RELOCATION_NOP) \ paravirt_ops.opname = (void *)vmi_nop; \ + else if (rel->type != VMI_RELOCATION_NONE) \ + printk(KERN_WARNING "VMI: Unknown relocation " \ + "type %d for " #vmicall"\n",\ + rel->type); \ } while (0) /* @@ -804,7 +828,7 @@ static inline int __init activate_vmi(void) para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); para_fill(set_iopl_mask, SetIOPLMask); para_fill(io_delay, IODelay); - para_fill(set_lazy_mode, SetLazyMode); + para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); /* user and kernel flush are just handled with different flags to FlushTLB */ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index ca51610955d..6f38f818380 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -26,7 +26,7 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") OUTPUT_ARCH(i386) ENTRY(phys_startup_32) jiffies = jiffies_64; -_proxy_pda = 0; +_proxy_pda = 1; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ |