From 5dfcea629a08b4684a019cd0cb59d0c9129a6c02 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 5 Jun 2011 13:50:23 -0400 Subject: x86-64: Fill unused parts of the vsyscall page with 0xcc Jumping to 0x00 might do something depending on the following bytes. Jumping to 0xcc is a trap. So fill the unused parts of the vsyscall page with 0xcc to make it useless for exploits to jump there. Signed-off-by: Andy Lutomirski Cc: Jesper Juhl Cc: Borislav Petkov Cc: Linus Torvalds Cc: Arjan van de Ven Cc: Jan Beulich Cc: richard -rw- weinberger Cc: Mikael Pettersson Cc: Andi Kleen Cc: Brian Gerst Cc: Louis Rilling Cc: Valdis.Kletnieks@vt.edu Cc: pageexec@freemail.hu Link: http://lkml.kernel.org/r/ed54bfcfbe50a9070d20ec1edbe0d149e22a4568.1307292171.git.luto@mit.edu Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmlinux.lds.S | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 4f90082fd64..80174719910 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -166,22 +166,20 @@ SECTIONS __vsyscall_0 = .; . = VSYSCALL_ADDR; - .vsyscall_0 : AT(VLOAD(.vsyscall_0)) { + .vsyscall : AT(VLOAD(.vsyscall)) { *(.vsyscall_0) - } :user - . = ALIGN(L1_CACHE_BYTES); - .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { + . = ALIGN(L1_CACHE_BYTES); *(.vsyscall_fn) - } - .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { + . = 1024; *(.vsyscall_1) - } - .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { + + . = 2048; *(.vsyscall_2) - } + . = 4096; /* Pad the whole page. */ + } :user =0xcc . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); #undef VSYSCALL_ADDR -- cgit v1.2.3-70-g09d2