diff options
Diffstat (limited to 'arch/x86_64/kernel/setup64.c')
-rw-r--r-- | arch/x86_64/kernel/setup64.c | 292 |
1 files changed, 292 insertions, 0 deletions
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c new file mode 100644 index 00000000000..b5305b04bc4 --- /dev/null +++ b/arch/x86_64/kernel/setup64.c @@ -0,0 +1,292 @@ +/* + * X86-64 specific CPU setup. + * Copyright (C) 1995 Linus Torvalds + * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. + * See setup.c for older changelog. + * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $ + */ +#include <linux/config.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/bootmem.h> +#include <linux/bitops.h> +#include <asm/pda.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/desc.h> +#include <asm/atomic.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/i387.h> +#include <asm/percpu.h> +#include <asm/mtrr.h> +#include <asm/proto.h> +#include <asm/mman.h> +#include <asm/numa.h> + +char x86_boot_params[2048] __initdata = {0,}; + +cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; + +struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; + +extern struct task_struct init_task; + +extern unsigned char __per_cpu_start[], __per_cpu_end[]; + +extern struct desc_ptr cpu_gdt_descr[]; +struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; + +char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); + +unsigned long __supported_pte_mask = ~0UL; +static int do_not_nx __initdata = 0; + +/* noexec=on|off +Control non executable mappings for 64bit processes. + +on Enable(default) +off Disable +*/ +int __init nonx_setup(char *str) +{ + if (!strncmp(str, "on", 2)) { + __supported_pte_mask |= _PAGE_NX; + do_not_nx = 0; + } else if (!strncmp(str, "off", 3)) { + do_not_nx = 1; + __supported_pte_mask &= ~_PAGE_NX; + } + return 0; +} +__setup("noexec=", nonx_setup); /* parsed early actually */ + +int force_personality32 = READ_IMPLIES_EXEC; + +/* noexec32=on|off +Control non executable heap for 32bit processes. +To control the stack too use noexec=off + +on PROT_READ does not imply PROT_EXEC for 32bit processes +off PROT_READ implies PROT_EXEC (default) +*/ +static int __init nonx32_setup(char *str) +{ + if (!strcmp(str, "on")) + force_personality32 &= ~READ_IMPLIES_EXEC; + else if (!strcmp(str, "off")) + force_personality32 |= READ_IMPLIES_EXEC; + return 0; +} +__setup("noexec32=", nonx32_setup); + +/* + * Great future plan: + * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. + * Always point %gs to its beginning + */ +void __init setup_per_cpu_areas(void) +{ + int i; + unsigned long size; + + /* Copy section for each CPU (we discard the original) */ + size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); +#ifdef CONFIG_MODULES + if (size < PERCPU_ENOUGH_ROOM) + size = PERCPU_ENOUGH_ROOM; +#endif + + for (i = 0; i < NR_CPUS; i++) { + unsigned char *ptr; + + if (!NODE_DATA(cpu_to_node(i))) { + printk("cpu with no node %d, num_online_nodes %d\n", + i, num_online_nodes()); + ptr = alloc_bootmem(size); + } else { + ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); + } + if (!ptr) + panic("Cannot allocate cpu data for CPU %d\n", i); + cpu_pda[i].data_offset = ptr - __per_cpu_start; + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + } +} + +void pda_init(int cpu) +{ + struct x8664_pda *pda = &cpu_pda[cpu]; + + /* Setup up data that may be needed in __get_free_pages early */ + asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); + wrmsrl(MSR_GS_BASE, cpu_pda + cpu); + + pda->me = pda; + pda->cpunumber = cpu; + pda->irqcount = -1; + pda->kernelstack = + (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; + pda->active_mm = &init_mm; + pda->mmu_state = 0; + + if (cpu == 0) { + /* others are initialized in smpboot.c */ + pda->pcurrent = &init_task; + pda->irqstackptr = boot_cpu_stack; + } else { + pda->irqstackptr = (char *) + __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); + if (!pda->irqstackptr) + panic("cannot allocate irqstack for cpu %d", cpu); + } + + asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); + + pda->irqstackptr += IRQSTACKSIZE-64; +} + +char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ] +__attribute__((section(".bss.page_aligned"))); + +/* May not be marked __init: used by software suspend */ +void syscall_init(void) +{ + /* + * LSTAR and STAR live in a bit strange symbiosis. + * They both write to the same internal register. STAR allows to set CS/DS + * but only a 32bit target. LSTAR sets the 64bit rip. + */ + wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); + wrmsrl(MSR_LSTAR, system_call); + +#ifdef CONFIG_IA32_EMULATION + syscall32_cpu_init (); +#endif + + /* Flags to clear on syscall */ + wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); +} + +void __init check_efer(void) +{ + unsigned long efer; + + rdmsrl(MSR_EFER, efer); + if (!(efer & EFER_NX) || do_not_nx) { + __supported_pte_mask &= ~_PAGE_NX; + } +} + +/* + * cpu_init() initializes state that is per-CPU. Some data is already + * initialized (naturally) in the bootstrap process, such as the GDT + * and IDT. We reload them nevertheless, this function acts as a + * 'CPU state barrier', nothing should get across. + * A lot of state is already set up in PDA init. + */ +void __init cpu_init (void) +{ +#ifdef CONFIG_SMP + int cpu = stack_smp_processor_id(); +#else + int cpu = smp_processor_id(); +#endif + struct tss_struct *t = &per_cpu(init_tss, cpu); + unsigned long v; + char *estacks = NULL; + struct task_struct *me; + int i; + + /* CPU 0 is initialised in head64.c */ + if (cpu != 0) { + pda_init(cpu); + } else + estacks = boot_exception_stacks; + + me = current; + + if (cpu_test_and_set(cpu, cpu_initialized)) + panic("CPU#%d already initialized!\n", cpu); + + printk("Initializing CPU#%d\n", cpu); + + clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); + + /* + * Initialize the per-CPU GDT with the boot GDT, + * and set up the GDT descriptor: + */ + if (cpu) { + memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE); + } + + cpu_gdt_descr[cpu].size = GDT_SIZE; + cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu]; + asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); + asm volatile("lidt %0" :: "m" (idt_descr)); + + memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); + + /* + * Delete NT + */ + + asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax"); + + syscall_init(); + + wrmsrl(MSR_FS_BASE, 0); + wrmsrl(MSR_KERNEL_GS_BASE, 0); + barrier(); + + check_efer(); + + /* + * set up and load the per-CPU TSS + */ + for (v = 0; v < N_EXCEPTION_STACKS; v++) { + if (cpu) { + estacks = (char *)__get_free_pages(GFP_ATOMIC, + EXCEPTION_STACK_ORDER); + if (!estacks) + panic("Cannot allocate exception stack %ld %d\n", + v, cpu); + } + estacks += EXCEPTION_STKSZ; + t->ist[v] = (unsigned long)estacks; + } + + t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + /* + * <= is required because the CPU will access up to + * 8 bits beyond the end of the IO permission bitmap. + */ + for (i = 0; i <= IO_BITMAP_LONGS; i++) + t->io_bitmap[i] = ~0UL; + + atomic_inc(&init_mm.mm_count); + me->active_mm = &init_mm; + if (me->mm) + BUG(); + enter_lazy_tlb(&init_mm, me); + + set_tss_desc(cpu, t); + load_TR_desc(); + load_LDT(&init_mm.context); + + /* + * Clear all 6 debug registers: + */ + + set_debug(0UL, 0); + set_debug(0UL, 1); + set_debug(0UL, 2); + set_debug(0UL, 3); + set_debug(0UL, 6); + set_debug(0UL, 7); + + fpu_init(); +} |