summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/acpi/Makefile2
-rw-r--r--arch/i386/kernel/alternative.c51
-rw-r--r--arch/i386/kernel/apic.c10
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/amd.c7
-rw-r--r--arch/i386/kernel/cpu/bugs.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c43
-rw-r--r--arch/i386/kernel/doublefault.c13
-rw-r--r--arch/i386/kernel/e820.c2
-rw-r--r--arch/i386/kernel/head.S4
-rw-r--r--arch/i386/kernel/io_apic.c7
-rw-r--r--arch/i386/kernel/legacy_serial.c67
-rw-r--r--arch/i386/kernel/microcode.c1
-rw-r--r--arch/i386/kernel/paravirt.c52
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/sys_i386.c1
-rw-r--r--arch/i386/kernel/sysenter.c1
-rw-r--r--arch/i386/kernel/vmi.c35
20 files changed, 119 insertions, 185 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index dbe5e87e0d6..9d33b00de65 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -35,7 +35,6 @@ obj-y += sysenter.o vsyscall.o
obj-$(CONFIG_ACPI_SRAT) += srat.o
obj-$(CONFIG_EFI) += efi.o efi_stub.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
-obj-$(CONFIG_SERIAL_8250) += legacy_serial.o
obj-$(CONFIG_VM86) += vm86.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
index 223f58fc9f4..7f7be01f44e 100644
--- a/arch/i386/kernel/acpi/Makefile
+++ b/arch/i386/kernel/acpi/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_ACPI) += boot.o
ifneq ($(CONFIG_PCI),)
obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
endif
-obj-$(CONFIG_ACPI) += sleep.o wakeup.o
+obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o processor.o
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index c3750c2c411..1b66d5c70ea 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -11,6 +11,8 @@
#include <asm/mce.h>
#include <asm/nmi.h>
+#define MAX_PATCH_LEN (255-1)
+
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
@@ -148,7 +150,8 @@ static unsigned char** find_nop_table(void)
#endif /* CONFIG_X86_64 */
-static void nop_out(void *insns, unsigned int len)
+/* Use this to add nops to a buffer, then text_poke the whole buffer. */
+static void add_nops(void *insns, unsigned int len)
{
unsigned char **noptable = find_nop_table();
@@ -156,7 +159,7 @@ static void nop_out(void *insns, unsigned int len)
unsigned int noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
- text_poke(insns, noptable[noplen], noplen);
+ memcpy(insns, noptable[noplen], noplen);
insns += noplen;
len -= noplen;
}
@@ -174,15 +177,15 @@ extern u8 *__smp_locks[], *__smp_locks_end[];
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
struct alt_instr *a;
- u8 *instr;
- int diff;
+ char insnbuf[MAX_PATCH_LEN];
DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
for (a = start; a < end; a++) {
+ u8 *instr = a->instr;
BUG_ON(a->replacementlen > a->instrlen);
+ BUG_ON(a->instrlen > sizeof(insnbuf));
if (!boot_cpu_has(a->cpuid))
continue;
- instr = a->instr;
#ifdef CONFIG_X86_64
/* vsyscall code is not mapped yet. resolve it manually. */
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
@@ -191,9 +194,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
__FUNCTION__, a->instr, instr);
}
#endif
- memcpy(instr, a->replacement, a->replacementlen);
- diff = a->instrlen - a->replacementlen;
- nop_out(instr + a->replacementlen, diff);
+ memcpy(insnbuf, a->replacement, a->replacementlen);
+ add_nops(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ text_poke(instr, insnbuf, a->instrlen);
}
}
@@ -215,16 +219,18 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
u8 **ptr;
+ char insn[1];
if (noreplace_smp)
return;
+ add_nops(insn, 1);
for (ptr = start; ptr < end; ptr++) {
if (*ptr < text)
continue;
if (*ptr > text_end)
continue;
- nop_out(*ptr, 1);
+ text_poke(*ptr, insn, 1);
};
}
@@ -351,6 +357,7 @@ void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{
struct paravirt_patch_site *p;
+ char insnbuf[MAX_PATCH_LEN];
if (noreplace_paravirt)
return;
@@ -358,13 +365,15 @@ void apply_paravirt(struct paravirt_patch_site *start,
for (p = start; p < end; p++) {
unsigned int used;
- used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
- p->len);
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
BUG_ON(used > p->len);
/* Pad the rest with nops */
- nop_out(p->instr + used, p->len - used);
+ add_nops(insnbuf + used, p->len - used);
+ text_poke(p->instr, insnbuf, p->len);
}
}
extern struct paravirt_patch_site __start_parainstructions[],
@@ -379,7 +388,7 @@ void __init alternative_instructions(void)
that might execute the to be patched code.
Other CPUs are not running. */
stop_nmi();
-#ifdef CONFIG_MCE
+#ifdef CONFIG_X86_MCE
stop_mce();
#endif
@@ -417,7 +426,7 @@ void __init alternative_instructions(void)
local_irq_restore(flags);
restart_nmi();
-#ifdef CONFIG_MCE
+#ifdef CONFIG_X86_MCE
restart_mce();
#endif
}
@@ -430,22 +439,12 @@ void __init alternative_instructions(void)
* And on the local CPU you need to be protected again NMI or MCE handlers
* seeing an inconsistent instruction while you patch.
*/
-void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
+void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
{
- u8 *addr = oaddr;
- if (!pte_write(*lookup_address((unsigned long)addr))) {
- struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
- addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
- if (!addr)
- return;
- addr += ((unsigned long)oaddr) % PAGE_SIZE;
- }
memcpy(addr, opcode, len);
sync_core();
/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
case. */
if (cpu_has_clflush)
- asm("clflush (%0) " :: "r" (oaddr) : "memory");
- if (addr != oaddr)
- vunmap(addr);
+ asm("clflush (%0) " :: "r" (addr) : "memory");
}
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index bfc6cb7df7e..f9fff29e01a 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -61,8 +61,9 @@ static int enable_local_apic __initdata = 0;
/* Local APIC timer verification ok */
static int local_apic_timer_verify_ok;
-/* Disable local APIC timer from the kernel commandline or via dmi quirk */
-static int local_apic_timer_disabled;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk
+ or using CPU MSR check */
+int local_apic_timer_disabled;
/* Local APIC timer works in C2 */
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
@@ -370,12 +371,9 @@ void __init setup_boot_APIC_clock(void)
long delta, deltapm;
int pm_referenced = 0;
- if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN))
- local_apic_timer_disabled = 1;
-
/*
* The local apic timer can be disabled via the kernel
- * commandline or from the test above. Register the lapic
+ * commandline or from the CPU detection code. Register the lapic
* timer as a dummy clock event source on SMP systems, so the
* broadcast mechanism is used. On UP systems simply ignore it.
*/
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 47001d50a08..f02a8aca826 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -2235,7 +2235,7 @@ static int __init apm_init(void)
apm_info.bios.cseg_16_len = 0; /* 64k */
if (debug) {
- printk(KERN_INFO "apm: entry %x:%lx cseg16 %x dseg %x",
+ printk(KERN_INFO "apm: entry %x:%x cseg16 %x dseg %x",
apm_info.bios.cseg, apm_info.bios.offset,
apm_info.bios.cseg_16, apm_info.bios.dseg);
if (apm_info.bios.version > 0x100)
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index c7ba455d5ac..dcf6bbb1c7c 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/processor.h>
+#include <asm/apic.h>
#include "cpu.h"
@@ -22,6 +23,7 @@
extern void vide(void);
__asm__(".align 4\nvide: ret");
+#ifdef CONFIG_X86_LOCAL_APIC
#define ENABLE_C1E_MASK 0x18000000
#define CPUID_PROCESSOR_SIGNATURE 1
#define CPUID_XFAM 0x0ff00000
@@ -52,6 +54,7 @@ static __cpuinit int amd_apic_timer_broken(void)
}
return 0;
}
+#endif
int force_mwait __cpuinitdata;
@@ -282,8 +285,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
num_cache_leaves = 3;
}
+#ifdef CONFIG_X86_LOCAL_APIC
if (amd_apic_timer_broken())
- set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
+ local_apic_timer_disabled = 1;
+#endif
if (c->x86 == 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
diff --git a/arch/i386/kernel/cpu/bugs.c b/arch/i386/kernel/cpu/bugs.c
index 54428a2500f..59266f03d1c 100644
--- a/arch/i386/kernel/cpu/bugs.c
+++ b/arch/i386/kernel/cpu/bugs.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/utsname.h>
+#include <asm/bugs.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 094118ba00d..d8c6f132dc7 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -92,7 +92,7 @@ config X86_POWERNOW_K8
config X86_POWERNOW_K8_ACPI
bool "ACPI Support"
select ACPI_PROCESSOR
- depends on X86_POWERNOW_K8
+ depends on ACPI && X86_POWERNOW_K8
default y
help
This provides access to the K8s Processor Performance States via ACPI.
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 6f846bee210..705e13a3078 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@ struct acpi_cpufreq_data {
};
static struct acpi_cpufreq_data *drv_data[NR_CPUS];
-static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+/* acpi_perf_data is a pointer to percpu data. */
+static struct acpi_processor_performance *acpi_perf_data;
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -508,26 +509,14 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
-static int acpi_cpufreq_early_init(void)
+static int __init acpi_cpufreq_early_init(void)
{
- struct acpi_processor_performance *data;
- cpumask_t covered;
- unsigned int i, j;
-
dprintk("acpi_cpufreq_early_init\n");
- for_each_possible_cpu(i) {
- data = kzalloc(sizeof(struct acpi_processor_performance),
- GFP_KERNEL);
- if (!data) {
- for_each_cpu_mask(j, covered) {
- kfree(acpi_perf_data[j]);
- acpi_perf_data[j] = NULL;
- }
- return -ENOMEM;
- }
- acpi_perf_data[i] = data;
- cpu_set(i, covered);
+ acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
+ if (!acpi_perf_data) {
+ dprintk("Memory allocation error for acpi_perf_data.\n");
+ return -ENOMEM;
}
/* Do initialization in ACPI core */
@@ -576,14 +565,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dprintk("acpi_cpufreq_cpu_init\n");
- if (!acpi_perf_data[cpu])
- return -ENODEV;
-
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->acpi_data = acpi_perf_data[cpu];
+ data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
drv_data[cpu] = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
@@ -780,24 +766,25 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static int __init acpi_cpufreq_init(void)
{
+ int ret;
+
dprintk("acpi_cpufreq_init\n");
- acpi_cpufreq_early_init();
+ ret = acpi_cpufreq_early_init();
+ if (ret)
+ return ret;
return cpufreq_register_driver(&acpi_cpufreq_driver);
}
static void __exit acpi_cpufreq_exit(void)
{
- unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- for_each_possible_cpu(i) {
- kfree(acpi_perf_data[i]);
- acpi_perf_data[i] = NULL;
- }
+ free_percpu(acpi_perf_data);
+
return;
}
diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c
index 265c5597efb..40978af630e 100644
--- a/arch/i386/kernel/doublefault.c
+++ b/arch/i386/kernel/doublefault.c
@@ -13,7 +13,7 @@
static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
-#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + 0x1000000)
+#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
static void doublefault_fn(void)
{
@@ -23,23 +23,23 @@ static void doublefault_fn(void)
store_gdt(&gdt_desc);
gdt = gdt_desc.address;
- printk("double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
if (ptr_ok(gdt)) {
gdt += GDT_ENTRY_TSS << 3;
tss = *(u16 *)(gdt+2);
tss += *(u8 *)(gdt+4) << 16;
tss += *(u8 *)(gdt+7) << 24;
- printk("double fault, tss at %08lx\n", tss);
+ printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) {
struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
- printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp);
+ printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp);
- printk("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
+ printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
t->eax, t->ebx, t->ecx, t->edx);
- printk("esi = %08lx, edi = %08lx\n",
+ printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
t->esi, t->edi);
}
}
@@ -63,6 +63,7 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
.cs = __KERNEL_CS,
.ss = __KERNEL_DS,
.ds = __USER_DS,
+ .fs = __KERNEL_PERCPU,
.__cr3 = __pa(swapper_pg_dir)
}
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
index e60cddbc4cf..3c86b979a40 100644
--- a/arch/i386/kernel/e820.c
+++ b/arch/i386/kernel/e820.c
@@ -321,7 +321,7 @@ static int __init request_standard_resources(void)
subsys_initcall(request_standard_resources);
-#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
+#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
/**
* e820_mark_nosave_regions - Find the ranges of physical addresses that do not
* correspond to e820 RAM areas and mark the corresponding pages as nosave for
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 7c52b222207..8f0382161c9 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -162,9 +162,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
* which will be freed later
*/
-#ifdef CONFIG_HOTPLUG_CPU
-.section .text,"ax",@progbits
-#else
+#ifndef CONFIG_HOTPLUG_CPU
.section .init.text,"ax",@progbits
#endif
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 893df828075..4b8a8da4b2e 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1256,12 +1256,15 @@ static struct irq_chip ioapic_chip;
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
+ trigger == IOAPIC_LEVEL) {
+ irq_desc[irq].status |= IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_fasteoi_irq, "fasteoi");
- else
+ } else {
+ irq_desc[irq].status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_edge_irq, "edge");
+ }
set_intr_gate(vector, interrupt[irq]);
}
diff --git a/arch/i386/kernel/legacy_serial.c b/arch/i386/kernel/legacy_serial.c
deleted file mode 100644
index 21510118544..00000000000
--- a/arch/i386/kernel/legacy_serial.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Legacy COM port devices for x86 platforms without PNPBIOS or ACPI.
- * Data taken from include/asm-i386/serial.h.
- *
- * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pnp.h>
-#include <linux/serial_8250.h>
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
-#define COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
-#else
-#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
-#define COM4_FLAGS UPF_BOOT_AUTOCONF
-#endif
-
-#define PORT(_base,_irq,_flags) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = _flags, \
- }
-
-static struct plat_serial8250_port x86_com_data[] = {
- PORT(0x3F8, 4, COM_FLAGS),
- PORT(0x2F8, 3, COM_FLAGS),
- PORT(0x3E8, 4, COM_FLAGS),
- PORT(0x2E8, 3, COM4_FLAGS),
- { },
-};
-
-static struct platform_device x86_com_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = x86_com_data,
- },
-};
-
-static int force_legacy_probe;
-module_param_named(force, force_legacy_probe, bool, 0);
-MODULE_PARM_DESC(force, "Force legacy serial port probe");
-
-static int __init serial8250_x86_com_init(void)
-{
- if (pnp_platform_devices && !force_legacy_probe)
- return -ENODEV;
-
- return platform_device_register(&x86_com_device);
-}
-
-module_init(serial8250_x86_com_init);
-
-MODULE_AUTHOR("Bjorn Helgaas");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic 8250/16x50 legacy probe module");
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index d865d041bea..09cf7811035 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -82,6 +82,7 @@
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index ea962c0667d..739cfb207dd 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -69,7 +69,8 @@ DEF_NATIVE(read_tsc, "rdtsc");
DEF_NATIVE(ud2a, "ud2a");
-static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ unsigned long addr, unsigned len)
{
const unsigned char *start, *end;
unsigned ret;
@@ -90,7 +91,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
#undef SITE
patch_site:
- ret = paravirt_patch_insns(insns, len, start, end);
+ ret = paravirt_patch_insns(ibuf, len, start, end);
break;
case PARAVIRT_PATCH(make_pgd):
@@ -107,7 +108,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
break;
default:
- ret = paravirt_patch_default(type, clobbers, insns, len);
+ ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
break;
}
@@ -129,68 +130,67 @@ struct branch {
u32 delta;
} __attribute__((packed));
-unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
- void *site, u16 site_clobbers,
+unsigned paravirt_patch_call(void *insnbuf,
+ const void *target, u16 tgt_clobbers,
+ unsigned long addr, u16 site_clobbers,
unsigned len)
{
- unsigned char *call = site;
- unsigned long delta = (unsigned long)target - (unsigned long)(call+5);
- struct branch b;
+ struct branch *b = insnbuf;
+ unsigned long delta = (unsigned long)target - (addr+5);
if (tgt_clobbers & ~site_clobbers)
return len; /* target would clobber too much for this site */
if (len < 5)
return len; /* call too long for patch site */
- b.opcode = 0xe8; /* call */
- b.delta = delta;
- BUILD_BUG_ON(sizeof(b) != 5);
- text_poke(call, (unsigned char *)&b, 5);
+ b->opcode = 0xe8; /* call */
+ b->delta = delta;
+ BUILD_BUG_ON(sizeof(*b) != 5);
return 5;
}
-unsigned paravirt_patch_jmp(void *target, void *site, unsigned len)
+unsigned paravirt_patch_jmp(const void *target, void *insnbuf,
+ unsigned long addr, unsigned len)
{
- unsigned char *jmp = site;
- unsigned long delta = (unsigned long)target - (unsigned long)(jmp+5);
- struct branch b;
+ struct branch *b = insnbuf;
+ unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5)
return len; /* call too long for patch site */
- b.opcode = 0xe9; /* jmp */
- b.delta = delta;
- text_poke(jmp, (unsigned char *)&b, 5);
+ b->opcode = 0xe9; /* jmp */
+ b->delta = delta;
return 5;
}
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len)
+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len)
{
void *opfunc = *((void **)&paravirt_ops + type);
unsigned ret;
if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
- ret = paravirt_patch_insns(site, len, start_ud2a, end_ud2a);
+ ret = paravirt_patch_insns(insnbuf, len, start_ud2a, end_ud2a);
else if (opfunc == paravirt_nop)
/* If the operation is a nop, then nop the callsite */
ret = paravirt_patch_nop();
else if (type == PARAVIRT_PATCH(iret) ||
type == PARAVIRT_PATCH(irq_enable_sysexit))
/* If operation requires a jmp, then jmp */
- ret = paravirt_patch_jmp(opfunc, site, len);
+ ret = paravirt_patch_jmp(opfunc, insnbuf, addr, len);
else
/* Otherwise call the function; assume target could
clobber any caller-save reg */
- ret = paravirt_patch_call(opfunc, CLBR_ANY,
- site, clobbers, len);
+ ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
+ addr, clobbers, len);
return ret;
}
-unsigned paravirt_patch_insns(void *site, unsigned len,
+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char *end)
{
unsigned insn_len = end - start;
@@ -198,7 +198,7 @@ unsigned paravirt_patch_insns(void *site, unsigned len,
if (insn_len > len || start == NULL)
insn_len = len;
else
- memcpy(site, start, insn_len);
+ memcpy(insnbuf, start, insn_len);
return insn_len;
}
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 7fe5da3c932..d474cd639bc 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -422,7 +422,7 @@ void __init setup_bootmem_allocator(void)
*/
reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
#endif
-#ifdef CONFIG_ACPI
+#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
*/
diff --git a/arch/i386/kernel/sys_i386.c b/arch/i386/kernel/sys_i386.c
index e5dcb937901..42147304de8 100644
--- a/arch/i386/kernel/sys_i386.c
+++ b/arch/i386/kernel/sys_i386.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 6deb159d08e..4eb2e408764 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/elf.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <asm/cpufeature.h>
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index 72042bb7ec9..18673e0f193 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -87,12 +87,14 @@ struct vmi_timer_ops vmi_timer_ops;
#define IRQ_PATCH_INT_MASK 0
#define IRQ_PATCH_DISABLE 5
-static inline void patch_offset(unsigned char *eip, unsigned char *dest)
+static inline void patch_offset(void *insnbuf,
+ unsigned long eip, unsigned long dest)
{
- *(unsigned long *)(eip+1) = dest-eip-5;
+ *(unsigned long *)(insnbuf+1) = dest-eip-5;
}
-static unsigned patch_internal(int call, unsigned len, void *insns)
+static unsigned patch_internal(int call, unsigned len, void *insnbuf,
+ unsigned long eip)
{
u64 reloc;
struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
@@ -100,14 +102,14 @@ static unsigned patch_internal(int call, unsigned len, void *insns)
switch(rel->type) {
case VMI_RELOCATION_CALL_REL:
BUG_ON(len < 5);
- *(char *)insns = MNEM_CALL;
- patch_offset(insns, rel->eip);
+ *(char *)insnbuf = MNEM_CALL;
+ patch_offset(insnbuf, eip, (unsigned long)rel->eip);
return 5;
case VMI_RELOCATION_JUMP_REL:
BUG_ON(len < 5);
- *(char *)insns = MNEM_JMP;
- patch_offset(insns, rel->eip);
+ *(char *)insnbuf = MNEM_JMP;
+ patch_offset(insnbuf, eip, (unsigned long)rel->eip);
return 5;
case VMI_RELOCATION_NOP:
@@ -128,21 +130,26 @@ static unsigned patch_internal(int call, unsigned len, void *insns)
* Apply patch if appropriate, return length of new instruction
* sequence. The callee does nop padding for us.
*/
-static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
+ unsigned long eip, unsigned len)
{
switch (type) {
case PARAVIRT_PATCH(irq_disable):
- return patch_internal(VMI_CALL_DisableInterrupts, len, insns);
+ return patch_internal(VMI_CALL_DisableInterrupts, len,
+ insns, eip);
case PARAVIRT_PATCH(irq_enable):
- return patch_internal(VMI_CALL_EnableInterrupts, len, insns);
+ return patch_internal(VMI_CALL_EnableInterrupts, len,
+ insns, eip);
case PARAVIRT_PATCH(restore_fl):
- return patch_internal(VMI_CALL_SetInterruptMask, len, insns);
+ return patch_internal(VMI_CALL_SetInterruptMask, len,
+ insns, eip);
case PARAVIRT_PATCH(save_fl):
- return patch_internal(VMI_CALL_GetInterruptMask, len, insns);
+ return patch_internal(VMI_CALL_GetInterruptMask, len,
+ insns, eip);
case PARAVIRT_PATCH(iret):
- return patch_internal(VMI_CALL_IRET, len, insns);
+ return patch_internal(VMI_CALL_IRET, len, insns, eip);
case PARAVIRT_PATCH(irq_enable_sysexit):
- return patch_internal(VMI_CALL_SYSEXIT, len, insns);
+ return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
default:
break;
}