diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-10-30 10:43:08 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-30 10:43:08 +0200 |
commit | 169ed55bd30305b933f52bfab32a58671d44ab68 (patch) | |
tree | 32e280957474f458901abfce16fa2a1687ef7497 /arch/x86/kernel | |
parent | 3d7851b3cdd43a734e5cc4c643fd886ab28ad4d5 (diff) | |
parent | 45f81b1c96d9793e47ce925d257ea693ce0b193e (diff) |
Merge branch 'tip/perf/jump-label-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent
Diffstat (limited to 'arch/x86/kernel')
38 files changed, 180 insertions, 244 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index e1252074ea4..69fd72aa559 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -13,6 +13,10 @@ #include <asm/segment.h> #include <asm/desc.h> +#ifdef CONFIG_X86_32 +#include <asm/pgtable.h> +#endif + #include "realmode/wakeup.h" #include "sleep.h" @@ -91,7 +95,7 @@ int acpi_save_state_mem(void) #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; - header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET); + header->pmode_cr3 = (u32)__pa(&initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ header->trampoline_segment = setup_trampoline() >> 4; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a36bb90aef5..0b30214282a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -644,65 +644,26 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) -unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; +#ifdef CONFIG_X86_64 +unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; +#else +unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; +#endif void __init arch_init_ideal_nop5(void) { - extern const unsigned char ftrace_test_p6nop[]; - extern const unsigned char ftrace_test_nop5[]; - extern const unsigned char ftrace_test_jmp[]; - int faulted = 0; - /* - * There is no good nop for all x86 archs. - * We will default to using the P6_NOP5, but first we - * will test to make sure that the nop will actually - * work on this CPU. If it faults, we will then - * go to a lesser efficient 5 byte nop. If that fails - * we then just use a jmp as our nop. This isn't the most - * efficient nop, but we can not use a multi part nop - * since we would then risk being preempted in the middle - * of that nop, and if we enabled tracing then, it might - * cause a system crash. + * There is no good nop for all x86 archs. This selection + * algorithm should be unified with the one in find_nop_table(), + * but this should be good enough for now. * - * TODO: check the cpuid to determine the best nop. + * For cases other than the ones below, use the safe (as in + * always functional) defaults above. */ - asm volatile ( - "ftrace_test_jmp:" - "jmp ftrace_test_p6nop\n" - "nop\n" - "nop\n" - "nop\n" /* 2 byte jmp + 3 bytes */ - "ftrace_test_p6nop:" - P6_NOP5 - "jmp 1f\n" - "ftrace_test_nop5:" - ".byte 0x66,0x66,0x66,0x66,0x90\n" - "1:" - ".section .fixup, \"ax\"\n" - "2: movl $1, %0\n" - " jmp ftrace_test_nop5\n" - "3: movl $2, %0\n" - " jmp 1b\n" - ".previous\n" - _ASM_EXTABLE(ftrace_test_p6nop, 2b) - _ASM_EXTABLE(ftrace_test_nop5, 3b) - : "=r"(faulted) : "0" (faulted)); - - switch (faulted) { - case 0: - pr_info("converting mcount calls to 0f 1f 44 00 00\n"); - memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5); - break; - case 1: - pr_info("converting mcount calls to 66 66 66 66 90\n"); - memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5); - break; - case 2: - pr_info("converting mcount calls to jmp . + 5\n"); - memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5); - break; - } - +#ifdef CONFIG_X86_64 + /* Don't use these on 32 bits due to broken virtualizers */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + memcpy(ideal_nop5, p6_nops[5], 5); +#endif } #endif diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 4c9c67bf09b..0e4f24c2a74 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -189,8 +189,8 @@ * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01. * * [This document is available free from Intel by calling 800.628.8686 (fax - * 916.356.6100) or 800.548.4725; or via anonymous ftp from - * ftp://ftp.intel.com/pub/IAL/software_specs/apmv11.doc. It is also + * 916.356.6100) or 800.548.4725; or from + * http://www.microsoft.com/whdc/archive/amp_12.mspx It is also * available from Microsoft by calling 206.882.8080.] * * APM 1.2 Reference: @@ -1926,6 +1926,7 @@ static const struct file_operations apm_bios_fops = { .unlocked_ioctl = do_ioctl, .open = do_open, .release = do_release, + .llseek = noop_llseek, }; static struct miscdevice apm_device = { diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dfdbf640389..1a4088dda37 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -99,9 +99,7 @@ void foo(void) DEFINE(PAGE_SIZE_asm, PAGE_SIZE); DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); - DEFINE(PTRS_PER_PTE, PTRS_PER_PTE); - DEFINE(PTRS_PER_PMD, PTRS_PER_PMD); - DEFINE(PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(THREAD_SIZE_asm, THREAD_SIZE); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index cd8da247dda..a2baafb2fe6 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); + kfree(data->freq_table); kfree(data); } diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 733093d6043..141abebc451 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -393,7 +393,7 @@ static struct cpufreq_driver nforce2_driver = { * Detects nForce2 A2 and C1 stepping * */ -static unsigned int nforce2_detect_chipset(void) +static int nforce2_detect_chipset(void) { nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index fc09f142d94..d9f51367666 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -35,7 +35,7 @@ static unsigned int longrun_low_freq, longrun_high_freq; * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS * and MSR_TMTA_LONGRUN_CTRL */ -static void __init longrun_get_policy(struct cpufreq_policy *policy) +static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) { u32 msr_lo, msr_hi; @@ -165,7 +165,7 @@ static unsigned int longrun_get(unsigned int cpu) * TMTA rules: * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) */ -static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq, +static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, unsigned int *high_freq) { u32 msr_lo, msr_hi; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 695f17731e2..d16c2c53d6b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -284,9 +284,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = apicid_to_node[apicid]; - if (node == NUMA_NO_NODE) - node = first_node(node_online_map); - else if (!node_online(node)) { + if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); } diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 12cd823c8d0..17ad0336621 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; + l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 8a85dd1b1aa..1e8d66c1336 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -192,6 +192,7 @@ static const struct file_operations severities_coverage_fops = { .release = seq_release, .read = seq_read, .write = severities_coverage_write, + .llseek = seq_lseek, }; static int __init severities_debugfs_init(void) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ed41562909f..7a35b72d7c0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1665,6 +1665,7 @@ struct file_operations mce_chrdev_ops = { .read = mce_read, .poll = mce_poll, .unlocked_ioctl = mce_ioctl, + .llseek = no_llseek, }; EXPORT_SYMBOL_GPL(mce_chrdev_ops); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a333bf9189f..ed6310183ef 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -49,7 +49,6 @@ static unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long offset, addr = (unsigned long)from; - int type = in_nmi() ? KM_NMI : KM_IRQ0; unsigned long size, len = 0; struct page *page; void *map; @@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) offset = addr & (PAGE_SIZE - 1); size = min(PAGE_SIZE - offset, n - len); - map = kmap_atomic(page, type); + map = kmap_atomic(page); memcpy(to, map+offset, size); - kunmap_atomic(map, type); + kunmap_atomic(map); put_page(page); len += size; diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index 67414550c3c..d5cd13945d5 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c @@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!is_crashed_pfn_valid(pfn)) return -EFAULT; - vaddr = kmap_atomic_pfn(pfn, KM_PTE0); + vaddr = kmap_atomic_pfn(pfn); if (!userbuf) { memcpy(buf, (vaddr + offset), csize); diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 0f6376ffa2d..1bc7f75a5bd 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, if (kstack_end(stack)) break; if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - printk("\n%s", log_lvl); - printk(" %08lx", *stack++); + printk(KERN_CONT "\n"); + printk(KERN_CONT " %08lx", *stack++); touch_nmi_watchdog(); } - printk("\n"); + printk(KERN_CONT "\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); } diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 57a21f11c79..6a340485249 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, if (stack >= irq_stack && stack <= irq_stack_end) { if (stack == irq_stack_end) { stack = (unsigned long *) (irq_stack_end[-1]); - printk(" <EOI> "); + printk(KERN_CONT " <EOI> "); } } else { if (((long) stack & (THREAD_SIZE-1)) == 0) break; } if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - printk("\n%s", log_lvl); - printk(" %016lx", *stack++); + printk(KERN_CONT "\n"); + printk(KERN_CONT " %016lx", *stack++); touch_nmi_watchdog(); } preempt_enable(); - printk("\n"); + printk(KERN_CONT "\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 9fb188d7bc7..59e175e8959 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -382,20 +382,20 @@ sysenter_past_esp: * enough kernel state to call TRACE_IRQS_OFF can be called - but * we immediately enable interrupts at that point anyway. */ - pushl_cfi $(__USER_DS) + pushl_cfi $__USER_DS /*CFI_REL_OFFSET ss, 0*/ pushl_cfi %ebp CFI_REL_OFFSET esp, 0 pushfl_cfi orl $X86_EFLAGS_IF, (%esp) - pushl_cfi $(__USER_CS) + pushl_cfi $__USER_CS /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) CFI_REL_OFFSET eip, 0 pushl_cfi %eax diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a7ae7fd1010..fe2690d71c0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -963,22 +963,10 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_SMP -apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ - invalidate_interrupt0 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ - invalidate_interrupt1 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ - invalidate_interrupt2 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ - invalidate_interrupt3 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ - invalidate_interrupt4 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ - invalidate_interrupt5 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ - invalidate_interrupt6 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ - invalidate_interrupt7 smp_invalidate_interrupt +.irpc idx, "01234567" +apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ + invalidate_interrupt\idx smp_invalidate_interrupt +.endr #endif apicinterrupt THRESHOLD_APIC_VECTOR \ diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 9a6ca239217..763310165fa 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -18,6 +18,7 @@ #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/bios_ebda.h> +#include <asm/tlbflush.h> static void __init i386_default_early_setup(void) { diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fa8c1b8e09f..bcece91dd31 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -183,13 +183,12 @@ default_entry: #ifdef CONFIG_X86_PAE /* - * In PAE mode swapper_pg_dir is statically defined to contain enough - * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 - * entries). The identity mapping is handled by pointing two PGD - * entries to the first kernel PMD. + * In PAE mode initial_page_table is statically defined to contain + * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 + * entries). The identity mapping is handled by pointing two PGD entries + * to the first kernel PMD. * - * Note the upper half of each PMD or PTE are always zero at - * this stage. + * Note the upper half of each PMD or PTE are always zero at this stage. */ #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ @@ -197,7 +196,7 @@ default_entry: xorl %ebx,%ebx /* %ebx is kept at zero */ movl $pa(__brk_base), %edi - movl $pa(swapper_pg_pmd), %edx + movl $pa(initial_pg_pmd), %edx movl $PTE_IDENT_ATTR, %eax 10: leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ @@ -226,14 +225,14 @@ default_entry: movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax + movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) #else /* Not PAE */ page_pde_offset = (__PAGE_OFFSET >> 20); movl $pa(__brk_base), %edi - movl $pa(swapper_pg_dir), %edx + movl $pa(initial_page_table), %edx movl $PTE_IDENT_ATTR, %eax 10: leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ @@ -257,8 +256,8 @@ page_pde_offset = (__PAGE_OFFSET >> 20); movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_dir+0xffc) + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax + movl %eax,pa(initial_page_table+0xffc) #endif jmp 3f /* @@ -334,7 +333,7 @@ ENTRY(startup_32_smp) /* * Enable paging */ - movl pa(initial_page_table), %eax + movl $pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl %cr0,%eax orl $X86_CR0_PG,%eax @@ -614,8 +613,6 @@ ignore_int: .align 4 ENTRY(initial_code) .long i386_start_kernel -ENTRY(initial_page_table) - .long pa(swapper_pg_dir) /* * BSS section @@ -623,20 +620,18 @@ ENTRY(initial_page_table) __PAGE_ALIGNED_BSS .align PAGE_SIZE_asm #ifdef CONFIG_X86_PAE -swapper_pg_pmd: +initial_pg_pmd: .fill 1024*KPMDS,4,0 #else -ENTRY(swapper_pg_dir) +ENTRY(initial_page_table) .fill 1024,4,0 #endif -swapper_pg_fixmap: +initial_pg_fixmap: .fill 1024,4,0 -#ifdef CONFIG_X86_TRAMPOLINE -ENTRY(trampoline_pg_dir) - .fill 1024,4,0 -#endif ENTRY(empty_zero_page) .fill 4096,1,0 +ENTRY(swapper_pg_dir) + .fill 1024,4,0 /* * This starts the data section. @@ -645,20 +640,20 @@ ENTRY(empty_zero_page) __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PAGE_SIZE_asm -ENTRY(swapper_pg_dir) - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ +ENTRY(initial_page_table) + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 # elif KPMDS == 2 .long 0,0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 # elif KPMDS == 1 .long 0,0 .long 0,0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 # else # error "Kernel PMDs should be 1, 2 or 3" # endif diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index efaf906daf9..ae03cab4352 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -380,44 +380,35 @@ static int hpet_next_event(unsigned long delta, struct clock_event_device *evt, int timer) { u32 cnt; + s32 res; cnt = hpet_readl(HPET_COUNTER); cnt += (u32) delta; hpet_writel(cnt, HPET_Tn_CMP(timer)); /* - * We need to read back the CMP register on certain HPET - * implementations (ATI chipsets) which seem to delay the - * transfer of the compare register into the internal compare - * logic. With small deltas this might actually be too late as - * the counter could already be higher than the compare value - * at that point and we would wait for the next hpet interrupt - * forever. We found out that reading the CMP register back - * forces the transfer so we can rely on the comparison with - * the counter register below. If the read back from the - * compare register does not match the value we programmed - * then we might have a real hardware problem. We can not do - * much about it here, but at least alert the user/admin with - * a prominent warning. - * - * An erratum on some chipsets (ICH9,..), results in - * comparator read immediately following a write returning old - * value. Workaround for this is to read this value second - * time, when first read returns old value. - * - * In fact the write to the comparator register is delayed up - * to two HPET cycles so the workaround we tried to restrict - * the readback to those known to be borked ATI chipsets - * failed miserably. So we give up on optimizations forever - * and penalize all HPET incarnations unconditionally. + * HPETs are a complete disaster. The compare register is + * based on a equal comparison and neither provides a less + * than or equal functionality (which would require to take + * the wraparound into account) nor a simple count down event + * mode. Further the write to the comparator register is + * delayed internally up to two HPET clock cycles in certain + * chipsets (ATI, ICH9,10). We worked around that by reading + * back the compare register, but that required another + * workaround for ICH9,10 chips where the first readout after + * write can return the old stale value. We already have a + * minimum delta of 5us enforced, but a NMI or SMI hitting + * between the counter readout and the comparator write can + * move us behind that point easily. Now instead of reading + * the compare register back several times, we make the ETIME + * decision based on the following: Return ETIME if the + * counter value after the write is less than 8 HPET cycles + * away from the event or if the counter is already ahead of + * the event. */ - if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { - if (hpet_readl(HPET_Tn_CMP(timer)) != cnt) - printk_once(KERN_WARNING - "hpet: compare register read back failed.\n"); - } + res = (s32)(cnt - hpet_readl(HPET_COUNTER)); - return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; + return res < 8 ? -ETIME : 0; } static void hpet_legacy_set_mode(enum clock_event_mode mode, @@ -722,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); + INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 10709f29d16..64668dbf00a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -49,21 +49,17 @@ static inline int check_stack_overflow(void) { return 0; } static inline void print_stack_overflow(void) { } #endif -#ifdef CONFIG_4KSTACKS /* * per-CPU IRQ handling contexts (thread information and stack) */ union irq_ctx { struct thread_info tinfo; u32 stack[THREAD_SIZE/sizeof(u32)]; -} __attribute__((aligned(PAGE_SIZE))); +} __attribute__((aligned(THREAD_SIZE))); static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); -static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); -static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); - static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" @@ -129,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu) if (per_cpu(hardirq_ctx, cpu)) return; - irqctx = &per_cpu(hardirq_stack, cpu); + irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; @@ -138,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu) per_cpu(hardirq_ctx, cpu) = irqctx; - irqctx = &per_cpu(softirq_stack, cpu); + irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; @@ -151,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu) cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); } -void irq_ctx_exit(int cpu) -{ - per_cpu(hardirq_ctx, cpu) = NULL; -} - asmlinkage void do_softirq(void) { unsigned long flags; @@ -187,11 +178,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -#else -static inline int -execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } -#endif - bool handle_irq(unsigned irq, struct pt_regs *regs) { struct irq_desc *desc; diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 8afd9f321f1..90fcf62854b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -78,6 +78,7 @@ static int setup_data_open(struct inode *inode, struct file *file) static const struct file_operations fops_setup_data = { .read = setup_data_read, .open = setup_data_open, + .llseek = default_llseek, }; static int __init diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 852b81967a3..d81cfebb848 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -477,8 +477,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, raw_smp_processor_id()); } - kgdb_correct_hw_break(); - return 0; } @@ -621,7 +619,12 @@ int kgdb_arch_init(void) static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { - kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP); + struct task_struct *tsk = current; + int i; + + for (i = 0; i < 4; i++) + if (breakinfo[i].enabled) + tsk->thread.debugreg6 |= (DR_TRAP0 << i); } void kgdb_arch_late(void) @@ -644,7 +647,7 @@ void kgdb_arch_late(void) if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); - if (IS_ERR(breakinfo[i].pev)) { + if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index eb9b76c716c..ca43ce31a19 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -128,13 +128,15 @@ static struct clocksource kvm_clock = { static int kvm_register_clock(char *txt) { int cpu = smp_processor_id(); - int low, high; + int low, high, ret; + low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1; high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32); + ret = native_write_msr_safe(msr_kvm_system_time, low, high); printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n", cpu, high, low, txt); - return native_write_msr_safe(msr_kvm_system_time, low, high); + return ret; } #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index fa6551d36c1..1cca374a2ba 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -12,7 +12,7 @@ * Software Developer's Manual * Order Number 253668 or free download from: * - * http://developer.intel.com/design/pentium4/manuals/253668.htm + * http://developer.intel.com/Assets/PDF/manual/253668.pdf * * For more information, go to http://www.urbanmyth.org/microcode * @@ -232,6 +232,7 @@ static const struct file_operations microcode_fops = { .owner = THIS_MODULE, .write = microcode_write, .open = microcode_open, + .llseek = no_llseek, }; static struct miscdevice microcode_dev = { diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 356170262a9..dcb65cc0a05 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -12,7 +12,7 @@ * Software Developer's Manual * Order Number 253668 or free download from: * - * http://developer.intel.com/design/pentium4/manuals/253668.htm + * http://developer.intel.com/Assets/PDF/manual/253668.pdf * * For more information, go to http://www.urbanmyth.org/microcode * diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 70c4872cd8a..45892dc4b72 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child) static const struct user_regset_view user_x86_32_view; /* Initialized below. */ #endif -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *)data; @@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) unsigned long tmp; ret = -EIO; - if ((addr & (sizeof(data) - 1)) || addr < 0 || - addr >= sizeof(struct user)) + if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; tmp = 0; /* Default return condition */ @@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ ret = -EIO; - if ((addr & (sizeof(data) - 1)) || addr < 0 || - addr >= sizeof(struct user)) + if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; if (addr < sizeof(struct user_regs_struct)) @@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case PTRACE_GET_THREAD_AREA: - if (addr < 0) + if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, - (struct user_desc __user *) data); + (struct user_desc __user *)data); break; case PTRACE_SET_THREAD_AREA: - if (addr < 0) + if ((int) addr < 0) return -EIO; ret = do_set_thread_area(child, addr, - (struct user_desc __user *) data, 0); + (struct user_desc __user *)data, 0); break; #endif diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 239427ca02a..bab3b9e6f66 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -82,7 +82,8 @@ static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift) static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) { u64 delta = native_read_tsc() - shadow->tsc_timestamp; - return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); + return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul, + shadow->tsc_shift); } /* diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 939b9e98245..8bbe8c56916 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -344,6 +344,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, vt8237_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, vt8237_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700, + vt8237_force_enable_hpet); static void ati_force_hpet_resume(void) { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 7a4cf14223b..c495aa8d481 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -371,16 +371,10 @@ void machine_real_restart(const unsigned char *code, int length) CMOS_WRITE(0x00, 0x8f); spin_unlock(&rtc_lock); - /* Remap the kernel at virtual address zero, as well as offset zero - from the kernel segment. This assumes the kernel segment starts at - virtual address PAGE_OFFSET. */ - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS); - /* - * Use `swapper_pg_dir' as our page directory. + * Switch back to the initial page table. */ - load_cr3(swapper_pg_dir); + load_cr3(initial_page_table); /* Write 0x1234 to absolute memory location 0x472. The BIOS reads this on booting to tell it to "Bypass memory test (also warm @@ -641,7 +635,7 @@ void native_machine_shutdown(void) /* O.K Now that I'm on the appropriate processor, * stop all of the others. */ - smp_send_stop(); + stop_other_cpus(); #endif lapic_shutdown(); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 420e6419785..95a32746fbf 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -700,6 +700,17 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); + + /* + * copy kernel address range established so far and switch + * to the proper swapper page table + */ + clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, + initial_page_table + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + + load_cr3(swapper_pg_dir); + __flush_tlb_all(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif @@ -985,7 +996,12 @@ void __init setup_arch(char **cmdline_p) paging_init(); x86_init.paging.pagetable_setup_done(swapper_pg_dir); - setup_trampoline_page_table(); +#ifdef CONFIG_X86_32 + /* sync back kernel address range */ + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif tboot_probe(); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d801210945d..513deac7228 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void) irq_exit(); } -static void native_smp_send_stop(void) +static void native_stop_other_cpus(int wait) { unsigned long flags; - unsigned long wait; + unsigned long timeout; if (reboot_force) return; @@ -179,9 +179,12 @@ static void native_smp_send_stop(void) if (num_online_cpus() > 1) { apic->send_IPI_allbutself(REBOOT_VECTOR); - /* Don't wait longer than a second */ - wait = USEC_PER_SEC; - while (num_online_cpus() > 1 && wait--) + /* + * Don't wait longer than a second if the caller + * didn't ask us to wait. + */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } @@ -227,7 +230,7 @@ struct smp_ops smp_ops = { .smp_prepare_cpus = native_smp_prepare_cpus, .smp_cpus_done = native_smp_cpus_done, - .smp_send_stop = native_smp_send_stop, + .stop_other_cpus = native_stop_other_cpus, .smp_send_reschedule = native_smp_send_reschedule, .cpu_up = native_cpu_up, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index dfb50890b5b..083e99d1b7d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -299,22 +299,16 @@ notrace static void __cpuinit start_secondary(void *unused) * fragile that we want to limit the things done here to the * most necessary things. */ + cpu_init(); + preempt_disable(); + smp_callin(); #ifdef CONFIG_X86_32 - /* - * Switch away from the trampoline page-table - * - * Do this before cpu_init() because it needs to access per-cpu - * data which may not be mapped in the trampoline page-table. - */ + /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - cpu_init(); - preempt_disable(); - smp_callin(); - /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* @@ -753,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; - INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); alternatives_smp_switch(1); @@ -785,7 +779,6 @@ do_rest: #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - initial_page_table = __pa(&trampoline_pg_dir); #else clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); @@ -934,7 +927,6 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; err = do_boot_cpu(apicid, cpu); - if (err) { pr_debug("do_boot_cpu failed %d\n", err); return -EIO; @@ -1381,7 +1373,6 @@ void play_dead_common(void) { idle_task_exit(); reset_lazy_tlbstate(); - irq_ctx_exit(raw_smp_processor_id()); c1e_remove_cpu(raw_smp_processor_id()); mb(); diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 312ef029281..20ea20a39e2 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -1001,10 +1001,10 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) static ssize_t tunables_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - char buf[300]; + char *buf; int ret; - ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", + buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", "max_bau_concurrent plugged_delay plugsb4reset", "timeoutsb4reset ipi_reset_limit complete_threshold", "congested_response_us congested_reps congested_period", @@ -1012,7 +1012,12 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf, timeoutsb4reset, ipi_reset_limit, complete_threshold, congested_response_us, congested_reps, congested_period); - return simple_read_from_buffer(userbuf, count, ppos, buf, ret); + if (!buf) + return -ENOMEM; + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); + kfree(buf); + return ret; } /* @@ -1285,6 +1290,7 @@ static const struct file_operations tunables_fops = { .open = tunables_open, .read = tunables_read, .write = tunables_write, + .llseek = default_llseek, }; static int __init uv_ptc_init(void) diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 4c3da5674e6..a375616d77f 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -38,19 +38,3 @@ unsigned long __trampinit setup_trampoline(void) memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); } - -void __init setup_trampoline_page_table(void) -{ -#ifdef CONFIG_X86_32 - /* Copy kernel address range */ - clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - - /* Initialize low mappings */ - clone_pgd_range(trampoline_pg_dir, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, - KERNEL_PGD_BOUNDARY)); -#endif -} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d43968503dd..cb838ca42c9 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); + preempt_conditional_cli(regs); return; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5ffb5622f79..61fb9851962 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -551,8 +551,14 @@ cannot_handle: int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { - if ((trapno == 3) || (trapno == 1)) - return_to_32bit(regs, VM86_TRAP + (trapno << 8)); + if ((trapno == 3) || (trapno == 1)) { + KVM86->regs32->ax = VM86_TRAP + (trapno << 8); + /* setting this flag forces the code in entry_32.S to + call save_v86_state() and change the stack pointer + to KVM86->regs32 */ + set_thread_flag(TIF_IRET); + return 0; + } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 38e2b67807e..e03530aebfd 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -301,7 +301,7 @@ SECTIONS } #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) - PERCPU(PAGE_SIZE) + PERCPU(THREAD_SIZE) #endif . = ALIGN(PAGE_SIZE); |