diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/alternative.c | 37 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 41 | ||||
-rw-r--r-- | arch/i386/kernel/doublefault.c | 13 | ||||
-rw-r--r-- | arch/i386/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/paravirt.c | 52 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 35 |
10 files changed, 111 insertions, 101 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index c85598acb8f..1b66d5c70ea 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c @@ -11,6 +11,8 @@ #include <asm/mce.h> #include <asm/nmi.h> +#define MAX_PATCH_LEN (255-1) + #ifdef CONFIG_HOTPLUG_CPU static int smp_alt_once; @@ -148,7 +150,8 @@ static unsigned char** find_nop_table(void) #endif /* CONFIG_X86_64 */ -static void nop_out(void *insns, unsigned int len) +/* Use this to add nops to a buffer, then text_poke the whole buffer. */ +static void add_nops(void *insns, unsigned int len) { unsigned char **noptable = find_nop_table(); @@ -156,7 +159,7 @@ static void nop_out(void *insns, unsigned int len) unsigned int noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; - text_poke(insns, noptable[noplen], noplen); + memcpy(insns, noptable[noplen], noplen); insns += noplen; len -= noplen; } @@ -174,15 +177,15 @@ extern u8 *__smp_locks[], *__smp_locks_end[]; void apply_alternatives(struct alt_instr *start, struct alt_instr *end) { struct alt_instr *a; - u8 *instr; - int diff; + char insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); for (a = start; a < end; a++) { + u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); + BUG_ON(a->instrlen > sizeof(insnbuf)); if (!boot_cpu_has(a->cpuid)) continue; - instr = a->instr; #ifdef CONFIG_X86_64 /* vsyscall code is not mapped yet. resolve it manually. */ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { @@ -191,9 +194,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) __FUNCTION__, a->instr, instr); } #endif - memcpy(instr, a->replacement, a->replacementlen); - diff = a->instrlen - a->replacementlen; - nop_out(instr + a->replacementlen, diff); + memcpy(insnbuf, a->replacement, a->replacementlen); + add_nops(insnbuf + a->replacementlen, + a->instrlen - a->replacementlen); + text_poke(instr, insnbuf, a->instrlen); } } @@ -215,16 +219,18 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) { u8 **ptr; + char insn[1]; if (noreplace_smp) return; + add_nops(insn, 1); for (ptr = start; ptr < end; ptr++) { if (*ptr < text) continue; if (*ptr > text_end) continue; - nop_out(*ptr, 1); + text_poke(*ptr, insn, 1); }; } @@ -351,6 +357,7 @@ void apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *end) { struct paravirt_patch_site *p; + char insnbuf[MAX_PATCH_LEN]; if (noreplace_paravirt) return; @@ -358,13 +365,15 @@ void apply_paravirt(struct paravirt_patch_site *start, for (p = start; p < end; p++) { unsigned int used; - used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr, - p->len); + BUG_ON(p->len > MAX_PATCH_LEN); + used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf, + (unsigned long)p->instr, p->len); BUG_ON(used > p->len); /* Pad the rest with nops */ - nop_out(p->instr + used, p->len - used); + add_nops(insnbuf + used, p->len - used); + text_poke(p->instr, insnbuf, p->len); } } extern struct paravirt_patch_site __start_parainstructions[], @@ -379,7 +388,7 @@ void __init alternative_instructions(void) that might execute the to be patched code. Other CPUs are not running. */ stop_nmi(); -#ifdef CONFIG_MCE +#ifdef CONFIG_X86_MCE stop_mce(); #endif @@ -417,7 +426,7 @@ void __init alternative_instructions(void) local_irq_restore(flags); restart_nmi(); -#ifdef CONFIG_MCE +#ifdef CONFIG_X86_MCE restart_mce(); #endif } diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index bfc6cb7df7e..3d67ae18d76 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -61,8 +61,9 @@ static int enable_local_apic __initdata = 0; /* Local APIC timer verification ok */ static int local_apic_timer_verify_ok; -/* Disable local APIC timer from the kernel commandline or via dmi quirk */ -static int local_apic_timer_disabled; +/* Disable local APIC timer from the kernel commandline or via dmi quirk + or using CPU MSR check */ +int local_apic_timer_disabled; /* Local APIC timer works in C2 */ int local_apic_timer_c2_ok; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); @@ -370,12 +371,9 @@ void __init setup_boot_APIC_clock(void) long delta, deltapm; int pm_referenced = 0; - if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) - local_apic_timer_disabled = 1; - /* * The local apic timer can be disabled via the kernel - * commandline or from the test above. Register the lapic + * commandline or from the CPU detection code. Register the lapic * timer as a dummy clock event source on SMP systems, so the * broadcast mechanism is used. On UP systems simply ignore it. */ @@ -1087,7 +1085,7 @@ static int __init detect_init_APIC (void) if (l & MSR_IA32_APICBASE_ENABLE) mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; - if (nmi_watchdog != NMI_NONE) + if (nmi_watchdog != NMI_NONE && nmi_watchdog != NMI_DISABLED) nmi_watchdog = NMI_LOCAL_APIC; printk(KERN_INFO "Found and enabled local APIC!\n"); diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index c7ba455d5ac..dcf6bbb1c7c 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -3,6 +3,7 @@ #include <linux/mm.h> #include <asm/io.h> #include <asm/processor.h> +#include <asm/apic.h> #include "cpu.h" @@ -22,6 +23,7 @@ extern void vide(void); __asm__(".align 4\nvide: ret"); +#ifdef CONFIG_X86_LOCAL_APIC #define ENABLE_C1E_MASK 0x18000000 #define CPUID_PROCESSOR_SIGNATURE 1 #define CPUID_XFAM 0x0ff00000 @@ -52,6 +54,7 @@ static __cpuinit int amd_apic_timer_broken(void) } return 0; } +#endif int force_mwait __cpuinitdata; @@ -282,8 +285,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) num_cache_leaves = 3; } +#ifdef CONFIG_X86_LOCAL_APIC if (amd_apic_timer_broken()) - set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); + local_apic_timer_disabled = 1; +#endif if (c->x86 == 0x10 && !force_mwait) clear_bit(X86_FEATURE_MWAIT, c->x86_capability); diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 32d04b083e3..705e13a3078 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -68,7 +68,8 @@ struct acpi_cpufreq_data { }; static struct acpi_cpufreq_data *drv_data[NR_CPUS]; -static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; +/* acpi_perf_data is a pointer to percpu data. */ +static struct acpi_processor_performance *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; @@ -508,24 +509,14 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) * do _PDC and _PSD and find out the processor dependency for the * actual init that will happen later... */ -static int acpi_cpufreq_early_init(void) +static int __init acpi_cpufreq_early_init(void) { - struct acpi_processor_performance *data; - unsigned int i, j; - dprintk("acpi_cpufreq_early_init\n"); - for_each_possible_cpu(i) { - data = kzalloc(sizeof(struct acpi_processor_performance), - GFP_KERNEL); - if (!data) { - for_each_possible_cpu(j) { - kfree(acpi_perf_data[j]); - acpi_perf_data[j] = NULL; - } - return -ENOMEM; - } - acpi_perf_data[i] = data; + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) { + dprintk("Memory allocation error for acpi_perf_data.\n"); + return -ENOMEM; } /* Do initialization in ACPI core */ @@ -574,14 +565,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) dprintk("acpi_cpufreq_cpu_init\n"); - if (!acpi_perf_data[cpu]) - return -ENODEV; - data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->acpi_data = acpi_perf_data[cpu]; + data->acpi_data = percpu_ptr(acpi_perf_data, cpu); drv_data[cpu] = data; if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) @@ -778,24 +766,25 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init(void) { + int ret; + dprintk("acpi_cpufreq_init\n"); - acpi_cpufreq_early_init(); + ret = acpi_cpufreq_early_init(); + if (ret) + return ret; return cpufreq_register_driver(&acpi_cpufreq_driver); } static void __exit acpi_cpufreq_exit(void) { - unsigned int i; dprintk("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); - for_each_possible_cpu(i) { - kfree(acpi_perf_data[i]); - acpi_perf_data[i] = NULL; - } + free_percpu(acpi_perf_data); + return; } diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c index 265c5597efb..40978af630e 100644 --- a/arch/i386/kernel/doublefault.c +++ b/arch/i386/kernel/doublefault.c @@ -13,7 +13,7 @@ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) -#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + 0x1000000) +#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) static void doublefault_fn(void) { @@ -23,23 +23,23 @@ static void doublefault_fn(void) store_gdt(&gdt_desc); gdt = gdt_desc.address; - printk("double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); if (ptr_ok(gdt)) { gdt += GDT_ENTRY_TSS << 3; tss = *(u16 *)(gdt+2); tss += *(u8 *)(gdt+4) << 16; tss += *(u8 *)(gdt+7) << 24; - printk("double fault, tss at %08lx\n", tss); + printk(KERN_EMERG "double fault, tss at %08lx\n", tss); if (ptr_ok(tss)) { struct i386_hw_tss *t = (struct i386_hw_tss *)tss; - printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp); + printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp); - printk("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", + printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", t->eax, t->ebx, t->ecx, t->edx); - printk("esi = %08lx, edi = %08lx\n", + printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", t->esi, t->edi); } } @@ -63,6 +63,7 @@ struct tss_struct doublefault_tss __cacheline_aligned = { .cs = __KERNEL_CS, .ss = __KERNEL_DS, .ds = __USER_DS, + .fs = __KERNEL_PERCPU, .__cr3 = __pa(swapper_pg_dir) } diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index 7c52b222207..8f0382161c9 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -162,9 +162,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); * which will be freed later */ -#ifdef CONFIG_HOTPLUG_CPU -.section .text,"ax",@progbits -#else +#ifndef CONFIG_HOTPLUG_CPU .section .init.text,"ax",@progbits #endif diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 893df828075..4b8a8da4b2e 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -1256,12 +1256,15 @@ static struct irq_chip ioapic_chip; static void ioapic_register_intr(int irq, int vector, unsigned long trigger) { if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) + trigger == IOAPIC_LEVEL) { + irq_desc[irq].status |= IRQ_LEVEL; set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_fasteoi_irq, "fasteoi"); - else + } else { + irq_desc[irq].status &= ~IRQ_LEVEL; set_irq_chip_and_handler_name(irq, &ioapic_chip, handle_edge_irq, "edge"); + } set_intr_gate(vector, interrupt[irq]); } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 99beac7f96c..8c1c965eb2a 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -77,7 +77,7 @@ static int __init check_nmi_watchdog(void) unsigned int *prev_nmi_count; int cpu; - if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) + if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) return 0; if (!atomic_read(&nmi_active)) @@ -424,7 +424,7 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, if (!!old_state == !!nmi_watchdog_enabled) return 0; - if (atomic_read(&nmi_active) < 0) { + if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) { printk( KERN_WARNING "NMI watchdog is permanently disabled\n"); return -EIO; } diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index ea962c0667d..739cfb207dd 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c @@ -69,7 +69,8 @@ DEF_NATIVE(read_tsc, "rdtsc"); DEF_NATIVE(ud2a, "ud2a"); -static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len) +static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + unsigned long addr, unsigned len) { const unsigned char *start, *end; unsigned ret; @@ -90,7 +91,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len) #undef SITE patch_site: - ret = paravirt_patch_insns(insns, len, start, end); + ret = paravirt_patch_insns(ibuf, len, start, end); break; case PARAVIRT_PATCH(make_pgd): @@ -107,7 +108,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len) break; default: - ret = paravirt_patch_default(type, clobbers, insns, len); + ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; } @@ -129,68 +130,67 @@ struct branch { u32 delta; } __attribute__((packed)); -unsigned paravirt_patch_call(void *target, u16 tgt_clobbers, - void *site, u16 site_clobbers, +unsigned paravirt_patch_call(void *insnbuf, + const void *target, u16 tgt_clobbers, + unsigned long addr, u16 site_clobbers, unsigned len) { - unsigned char *call = site; - unsigned long delta = (unsigned long)target - (unsigned long)(call+5); - struct branch b; + struct branch *b = insnbuf; + unsigned long delta = (unsigned long)target - (addr+5); if (tgt_clobbers & ~site_clobbers) return len; /* target would clobber too much for this site */ if (len < 5) return len; /* call too long for patch site */ - b.opcode = 0xe8; /* call */ - b.delta = delta; - BUILD_BUG_ON(sizeof(b) != 5); - text_poke(call, (unsigned char *)&b, 5); + b->opcode = 0xe8; /* call */ + b->delta = delta; + BUILD_BUG_ON(sizeof(*b) != 5); return 5; } -unsigned paravirt_patch_jmp(void *target, void *site, unsigned len) +unsigned paravirt_patch_jmp(const void *target, void *insnbuf, + unsigned long addr, unsigned len) { - unsigned char *jmp = site; - unsigned long delta = (unsigned long)target - (unsigned long)(jmp+5); - struct branch b; + struct branch *b = insnbuf; + unsigned long delta = (unsigned long)target - (addr+5); if (len < 5) return len; /* call too long for patch site */ - b.opcode = 0xe9; /* jmp */ - b.delta = delta; - text_poke(jmp, (unsigned char *)&b, 5); + b->opcode = 0xe9; /* jmp */ + b->delta = delta; return 5; } -unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len) +unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, + unsigned long addr, unsigned len) { void *opfunc = *((void **)¶virt_ops + type); unsigned ret; if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ - ret = paravirt_patch_insns(site, len, start_ud2a, end_ud2a); + ret = paravirt_patch_insns(insnbuf, len, start_ud2a, end_ud2a); else if (opfunc == paravirt_nop) /* If the operation is a nop, then nop the callsite */ ret = paravirt_patch_nop(); else if (type == PARAVIRT_PATCH(iret) || type == PARAVIRT_PATCH(irq_enable_sysexit)) /* If operation requires a jmp, then jmp */ - ret = paravirt_patch_jmp(opfunc, site, len); + ret = paravirt_patch_jmp(opfunc, insnbuf, addr, len); else /* Otherwise call the function; assume target could clobber any caller-save reg */ - ret = paravirt_patch_call(opfunc, CLBR_ANY, - site, clobbers, len); + ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY, + addr, clobbers, len); return ret; } -unsigned paravirt_patch_insns(void *site, unsigned len, +unsigned paravirt_patch_insns(void *insnbuf, unsigned len, const char *start, const char *end) { unsigned insn_len = end - start; @@ -198,7 +198,7 @@ unsigned paravirt_patch_insns(void *site, unsigned len, if (insn_len > len || start == NULL) insn_len = len; else - memcpy(site, start, insn_len); + memcpy(insnbuf, start, insn_len); return insn_len; } diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index 72042bb7ec9..18673e0f193 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -87,12 +87,14 @@ struct vmi_timer_ops vmi_timer_ops; #define IRQ_PATCH_INT_MASK 0 #define IRQ_PATCH_DISABLE 5 -static inline void patch_offset(unsigned char *eip, unsigned char *dest) +static inline void patch_offset(void *insnbuf, + unsigned long eip, unsigned long dest) { - *(unsigned long *)(eip+1) = dest-eip-5; + *(unsigned long *)(insnbuf+1) = dest-eip-5; } -static unsigned patch_internal(int call, unsigned len, void *insns) +static unsigned patch_internal(int call, unsigned len, void *insnbuf, + unsigned long eip) { u64 reloc; struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; @@ -100,14 +102,14 @@ static unsigned patch_internal(int call, unsigned len, void *insns) switch(rel->type) { case VMI_RELOCATION_CALL_REL: BUG_ON(len < 5); - *(char *)insns = MNEM_CALL; - patch_offset(insns, rel->eip); + *(char *)insnbuf = MNEM_CALL; + patch_offset(insnbuf, eip, (unsigned long)rel->eip); return 5; case VMI_RELOCATION_JUMP_REL: BUG_ON(len < 5); - *(char *)insns = MNEM_JMP; - patch_offset(insns, rel->eip); + *(char *)insnbuf = MNEM_JMP; + patch_offset(insnbuf, eip, (unsigned long)rel->eip); return 5; case VMI_RELOCATION_NOP: @@ -128,21 +130,26 @@ static unsigned patch_internal(int call, unsigned len, void *insns) * Apply patch if appropriate, return length of new instruction * sequence. The callee does nop padding for us. */ -static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len) +static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, + unsigned long eip, unsigned len) { switch (type) { case PARAVIRT_PATCH(irq_disable): - return patch_internal(VMI_CALL_DisableInterrupts, len, insns); + return patch_internal(VMI_CALL_DisableInterrupts, len, + insns, eip); case PARAVIRT_PATCH(irq_enable): - return patch_internal(VMI_CALL_EnableInterrupts, len, insns); + return patch_internal(VMI_CALL_EnableInterrupts, len, + insns, eip); case PARAVIRT_PATCH(restore_fl): - return patch_internal(VMI_CALL_SetInterruptMask, len, insns); + return patch_internal(VMI_CALL_SetInterruptMask, len, + insns, eip); case PARAVIRT_PATCH(save_fl): - return patch_internal(VMI_CALL_GetInterruptMask, len, insns); + return patch_internal(VMI_CALL_GetInterruptMask, len, + insns, eip); case PARAVIRT_PATCH(iret): - return patch_internal(VMI_CALL_IRET, len, insns); + return patch_internal(VMI_CALL_IRET, len, insns, eip); case PARAVIRT_PATCH(irq_enable_sysexit): - return patch_internal(VMI_CALL_SYSEXIT, len, insns); + return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); default: break; } |