diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-05-08 21:22:36 +0300 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-05-08 11:48:03 -0700 |
commit | 968ff9ee56f1e3ed4ff4a6d10185865dc77d8f7e (patch) | |
tree | 203601668b2e0ee01b9acdc19ef4732c46c0fe72 /arch/x86/realmode | |
parent | 056a43a6d3ab903a798d8ee4435ad67d6fccc3e6 (diff) |
x86, realmode: Remove indirect jumps in trampoline_32 and wakeup_asm
Remove indirect jumps in trampoline_32.S and the 32-bit part of
wakeup_asm.S. There exist systems which are known to do weird
things if an SMI comes in right after a mode switch, and the
safest way to deal with it is to always follow with a simple
absolute far jump. In the 64-bit code we then to a register
indirect near jump; follow that pattern for the 32-bit code.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Link: http://lkml.kernel.org/r/1336501366-28617-14-git-send-email-jarkko.sakkinen@intel.com
Diffstat (limited to 'arch/x86/realmode')
-rw-r--r-- | arch/x86/realmode/rm/trampoline_32.S | 22 | ||||
-rw-r--r-- | arch/x86/realmode/rm/wakeup/wakeup_asm.S | 8 |
2 files changed, 18 insertions, 12 deletions
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index 1f9e3316f73..1315ef48dbf 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -47,24 +47,29 @@ trampoline_data: cli # We should be safe anyway + movl startup_32_smp, %eax # where we need to go + movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running - /* GDT tables in non default location kernel can be beyond 16MB and + /* + * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ - lidtl boot_idt_descr # load idt with 0, 0 lgdtl boot_gdt_descr # load gdt with whatever is appropriate - xor %ax, %ax - inc %ax # protected mode (PE) bit - lmsw %ax # into protected mode + movw $1, %dx # protected mode (PE) bit + lmsw %dx # into protected mode - # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S - ljmpl *(startup_32_smp) + ljmpl $__BOOT_CS, $pa_startup_32 + + .section ".text32","ax" + .code32 +ENTRY(startup_32) # note: also used from wakeup_asm.S + jmp *%eax .data .globl startup_32_smp, boot_gdt, trampoline_status @@ -82,5 +87,4 @@ trampoline_status: .long 0 startup_32_smp: - .long 0x00000000 - .word __BOOT_CS, 0 + .long 0 diff --git a/arch/x86/realmode/rm/wakeup/wakeup_asm.S b/arch/x86/realmode/rm/wakeup/wakeup_asm.S index b61126cb599..4c5c5f2bfbe 100644 --- a/arch/x86/realmode/rm/wakeup/wakeup_asm.S +++ b/arch/x86/realmode/rm/wakeup/wakeup_asm.S @@ -124,9 +124,11 @@ wakeup_start: lgdtl pmode_gdt /* This really couldn't... */ - movl pmode_cr0, %eax - movl %eax, %cr0 - ljmpl *pmode_entry + movl pmode_entry, %eax + movl pmode_cr0, %ecx + movl %ecx, %cr0 + ljmpl $__KERNEL_CS, $pa_startup_32 + /* -> jmp *%eax in trampoline_32.S */ #else jmp trampoline_data #endif |