diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/realmode/wakeup.lds.S | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/geode_32.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/setup_32.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/setup_64.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/x8664_ksyms_64.c | 3 |
15 files changed, 133 insertions, 69 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index bbdacb398d4..5e618c3b472 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -83,9 +83,7 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o -ifdef CONFIG_INPUT_PCSPKR -obj-y += pcspeaker.o -endif +obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o obj-$(CONFIG_SCx200) += scx200.o scx200-y += scx200_32.o diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 22fab6c4be1..7da00b799cd 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S @@ -12,11 +12,6 @@ ENTRY(_start) SECTIONS { - . = HEADER_OFFSET; - .header : { - *(.header) - } - . = 0; .text : { *(.text*) @@ -50,6 +45,11 @@ SECTIONS __bss_end = .; } + . = HEADER_OFFSET; + .header : { + *(.header) + } + . = ALIGN(16); _end = .; diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 238468ae199..c2e1ce33c7c 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -6,6 +6,7 @@ #include <linux/cpu.h> +#include <asm/pat.h> #include <asm/processor.h> struct cpuid_bit { @@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) set_cpu_cap(c, cb->feature); } } + +#ifdef CONFIG_X86_PAT +void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) +{ + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + return; + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + return; + break; + } + + pat_disable(cpu_has_pat ? + "PAT disabled. Not yet verified on this CPU type." : + "PAT not supported by CPU."); +} +#endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 35b4f6a9c8e..d0463a94624 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -12,6 +12,7 @@ #include <asm/mmu_context.h> #include <asm/mtrr.h> #include <asm/mce.h> +#include <asm/pat.h> #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> @@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) } - clear_cpu_cap(c, X86_FEATURE_PAT); - - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - if (c->x86 >= 0xf && c->x86 <= 0x11) - set_cpu_cap(c, X86_FEATURE_PAT); - break; - case X86_VENDOR_INTEL: - if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) - set_cpu_cap(c, X86_FEATURE_PAT); - break; - } - } /* @@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); } - clear_cpu_cap(c, X86_FEATURE_PAT); - - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - if (c->x86 >= 0xf && c->x86 <= 0x11) - set_cpu_cap(c, X86_FEATURE_PAT); - break; - case X86_VENDOR_INTEL: - if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) - set_cpu_cap(c, X86_FEATURE_PAT); - break; - } } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) @@ -651,6 +627,7 @@ void __init early_cpu_init(void) cpu_devs[cvdev->vendor] = cvdev->cpu_dev; early_cpu_detect(); + validate_pat_support(&boot_cpu_data); } /* Make sure %fs is initialized properly in idle threads */ diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c index 9dad6ca6cd7..e8edd63ab00 100644 --- a/arch/x86/kernel/geode_32.c +++ b/arch/x86/kernel/geode_32.c @@ -161,6 +161,25 @@ void geode_gpio_setup_event(unsigned int gpio, int pair, int pme) } EXPORT_SYMBOL_GPL(geode_gpio_setup_event); +int geode_has_vsa2(void) +{ + static int has_vsa2 = -1; + + if (has_vsa2 == -1) { + /* + * The VSA has virtual registers that we can query for a + * signature. + */ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + has_vsa2 = (inw(VSA_VRC_DATA) == VSA_SIG); + } + + return has_vsa2; +} +EXPORT_SYMBOL_GPL(geode_has_vsa2); + static int __init geode_southbridge_init(void) { if (!is_geode()) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index db6839b5319..e03cc952f23 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -450,7 +450,6 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; - clear_fpu(tsk); return __copy_from_user(&tsk->thread.xstate->fsave, buf, sizeof(struct i387_fsave_struct)); } @@ -461,7 +460,6 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) struct user_i387_ia32_struct env; int err; - clear_fpu(tsk); err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], sizeof(struct i387_fxsave_struct)); /* mxcsr reserved bits must be masked to zero for security reasons */ @@ -478,6 +476,16 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) int err; if (HAVE_HWFP) { + struct task_struct *tsk = current; + + clear_fpu(tsk); + + if (!used_math()) { + err = init_fpu(tsk); + if (err) + return err; + } + if (cpu_has_fxsr) err = restore_i387_fxsave(buf); else diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 0c37f16b695..c5ef1af8e79 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -385,11 +385,13 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) return memory; - if (!dev) + if (!dev) { dev = &fallback_dev; + gfp |= GFP_DMA; + } dma_mask = dev->coherent_dma_mask; if (dma_mask == 0) - dma_mask = DMA_32BIT_MASK; + dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; /* Device not DMA able */ if (dev->dma_mask == NULL) @@ -403,7 +405,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, larger than 16MB and in this case we have a chance of finding fitting memory in the next higher zone first. If not retry with true GFP_DMA. -AK */ - if (dma_mask <= DMA_32BIT_MASK) + if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 67e9b4a1e89..ba370dc8685 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -99,15 +99,6 @@ static void mwait_idle(void) local_irq_enable(); } - -static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) -{ - if (force_mwait) - return 1; - /* Any C1 states supported? */ - return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; -} - /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the @@ -119,6 +110,33 @@ static void poll_idle(void) cpu_relax(); } +/* + * mwait selection logic: + * + * It depends on the CPU. For AMD CPUs that support MWAIT this is + * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings + * then depend on a clock divisor and current Pstate of the core. If + * all cores of a processor are in halt state (C1) the processor can + * enter the C1E (C1 enhanced) state. If mwait is used this will never + * happen. + * + * idle=mwait overrides this decision and forces the usage of mwait. + */ +static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) +{ + if (force_mwait) + return 1; + + if (c->x86_vendor == X86_VENDOR_AMD) { + switch(c->x86) { + case 0x10: + case 0x11: + return 0; + } + } + return 1; +} + void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { static int selected; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fb03ef380f0..a7835f28293 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1303,6 +1303,9 @@ static const struct user_regset_view user_x86_64_view = { #define genregs32_get genregs_get #define genregs32_set genregs_set +#define user_i387_ia32_struct user_i387_struct +#define user32_fxsr_struct user_fxsr_struct + #endif /* CONFIG_X86_64 */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION @@ -1315,13 +1318,13 @@ static const struct user_regset x86_32_regsets[] = { }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, - .n = sizeof(struct user_i387_struct) / sizeof(u32), + .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = fpregs_active, .get = fpregs_get, .set = fpregs_set }, [REGSET_XFP] = { .core_note_type = NT_PRXFPREG, - .n = sizeof(struct user_i387_struct) / sizeof(u32), + .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set }, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cc6f5eb20b2..6f80b852a19 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -12,6 +12,7 @@ #include <asm/mpspec.h> #include <asm/apicdef.h> +#ifdef CONFIG_X86_LOCAL_APIC unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; /* Processor that is doing the boot up */ @@ -23,8 +24,9 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; +#endif -#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) +#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) /* * Copy data used in early init routines from the initial arrays to the * per cpu data areas. These arrays then become expendable and the @@ -95,7 +97,7 @@ void __init setup_per_cpu_areas(void) /* Copy section for each CPU (we discard the original) */ size = PERCPU_ENOUGH_ROOM; - printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); for_each_possible_cpu(i) { diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 2283422af79..2c5f8b213e8 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -127,7 +127,12 @@ static struct resource standard_io_resources[] = { { }, { .name = "keyboard", .start = 0x0060, - .end = 0x006f, + .end = 0x0060, + .flags = IORESOURCE_BUSY | IORESOURCE_IO +}, { + .name = "keyboard", + .start = 0x0064, + .end = 0x0064, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 22c14e21c97..6dff1286ad8 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -70,6 +70,7 @@ #include <asm/ds.h> #include <asm/topology.h> #include <asm/trampoline.h> +#include <asm/pat.h> #include <mach_apic.h> #ifdef CONFIG_PARAVIRT @@ -128,7 +129,9 @@ static struct resource standard_io_resources[] = { .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer1", .start = 0x50, .end = 0x53, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, - { .name = "keyboard", .start = 0x60, .end = 0x6f, + { .name = "keyboard", .start = 0x60, .end = 0x60, + .flags = IORESOURCE_BUSY | IORESOURCE_IO }, + { .name = "keyboard", .start = 0x64, .end = 0x64, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", .start = 0x80, .end = 0x8f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, @@ -948,7 +951,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) { if (c->x86 == 0x6 && c->x86_model >= 0xf) - set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); } static void __cpuinit init_centaur(struct cpuinfo_x86 *c) @@ -1063,25 +1066,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x80000007) c->x86_power = cpuid_edx(0x80000007); - - clear_cpu_cap(c, X86_FEATURE_PAT); - switch (c->x86_vendor) { case X86_VENDOR_AMD: early_init_amd(c); - if (c->x86 >= 0xf && c->x86 <= 0x11) - set_cpu_cap(c, X86_FEATURE_PAT); break; case X86_VENDOR_INTEL: early_init_intel(c); - if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) - set_cpu_cap(c, X86_FEATURE_PAT); break; case X86_VENDOR_CENTAUR: early_init_centaur(c); break; } + validate_pat_support(c); } /* diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 8f75893a646..0cb7aadc87c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -231,7 +231,8 @@ native_smp_call_function_mask(cpumask_t mask, wmb(); /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) + if (cpus_equal(mask, allbutself) && + cpus_equal(cpu_online_map, cpu_callout_map)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6b087ab6cd8..38988491c62 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -86,6 +86,7 @@ void *x86_bios_cpu_apicid_early_ptr; #ifdef CONFIG_X86_32 u8 apicid_2_node[MAX_APICID]; +static int low_mappings; #endif /* State of each CPU */ @@ -326,6 +327,12 @@ static void __cpuinit start_secondary(void *unused) enable_8259A_irq(0); } +#ifdef CONFIG_X86_32 + while (low_mappings) + cpu_relax(); + __flush_tlb_all(); +#endif + /* This must be done before setting cpu_online_map */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); @@ -1040,14 +1047,20 @@ int __cpuinit native_cpu_up(unsigned int cpu) #ifdef CONFIG_X86_32 /* init low mem mapping */ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); flush_tlb_all(); -#endif + low_mappings = 1; err = do_boot_cpu(apicid, cpu); - if (err < 0) { + + zap_low_mappings(); + low_mappings = 0; +#else + err = do_boot_cpu(apicid, cpu); +#endif + if (err) { Dprintk("do_boot_cpu failed %d\n", err); - return err; + return -EIO; } /* @@ -1259,9 +1272,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) setup_ioapic_dest(); #endif check_nmi_watchdog(); -#ifdef CONFIG_X86_32 - zap_low_mappings(); -#endif } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 58882f9f263..f6c05d0410f 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -2,6 +2,7 @@ All C exports should go in the respective C files. */ #include <linux/module.h> +#include <net/checksum.h> #include <linux/smp.h> #include <asm/processor.h> @@ -29,6 +30,8 @@ EXPORT_SYMBOL(__copy_from_user_inatomic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); +EXPORT_SYMBOL(csum_partial); + /* * Export string functions. We normally rely on gcc builtin for most of these, * but gcc sometimes decides not to inline them. |