From 70c70d97809c3cdb8ff04f38ee3718c5385a2a4d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 26 Aug 2010 15:08:35 -0700 Subject: ARM: SECCOMP support Signed-off-by: Nicolas Pitre --- arch/arm/kernel/entry-common.S | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7885722bdf4..0385a8207b6 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -295,7 +295,6 @@ ENTRY(vector_swi) get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -312,8 +311,20 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + +#ifdef CONFIG_SECCOMP + tst r10, #_TIF_SECCOMP + beq 1f + mov r0, scno + bl __secure_computing + add r0, sp, #S_R0 + S_OFF @ pointer to regs + ldmia r0, {r0 - r3} @ have to reload r0 - r3 +1: +#endif + + tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit -- cgit v1.2.3-70-g09d2 From ec706dab290c486837d4a825870ab052bf200279 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 26 Aug 2010 23:10:50 -0400 Subject: ARM: add a vma entry for the user accessible vector page The kernel makes the high vector page visible to user space. This page contains (amongst others) small code segments that can be executed in user space. Make this page visible through ptrace and /proc//mem in order to let gdb perform code parsing needed for proper unwinding. For example, the ERESTART_RESTARTBLOCK handler actually has a stack frame -- it returns to a PC value stored on the user's stack. To unwind after a "sleep" system call was interrupted twice, GDB would have to recognize this situation and understand that stack frame layout -- which it currently cannot do. We could fix this by hard-coding addresses in the vector page range into GDB, but that isn't really portable as not all of those addresses are guaranteed to remain stable across kernel releases. And having the gdb process make an exception for this page and get content from its own address space for it looks strange, and it is not future proof either. Being located above PAGE_OFFSET, this vma cannot be deleted by user space code. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/elf.h | 4 ++++ arch/arm/include/asm/mmu_context.h | 29 ++++++++++++++++++++++++++++- arch/arm/kernel/process.c | 21 +++++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 5747a8baa41..8bb66bca2e3 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -127,4 +127,8 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +extern int vectors_user_mapping(void); +#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES + #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a0b3cac0547..71605d9f8e4 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,7 +18,6 @@ #include #include #include -#include void __check_kvm_seq(struct mm_struct *mm); @@ -134,4 +133,32 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) +/* + * We are inserting a "fake" vma for the user-accessible vector page so + * gdb and friends can get to it through ptrace and /proc//mem. + * But we also want to remove it before the generic code gets to see it + * during process exit or the unmapping of it would cause total havoc. + * (the macro is used as remove_vma() is static to mm/mmap.c) + */ +#define arch_exit_mmap(mm) \ +do { \ + struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ + if (high_vma) { \ + BUG_ON(high_vma->vm_next); /* it should be last */ \ + if (high_vma->vm_prev) \ + high_vma->vm_prev->vm_next = NULL; \ + else \ + mm->mmap = NULL; \ + rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ + mm->mmap_cache = NULL; \ + mm->map_count--; \ + remove_vma(high_vma); \ + } \ +} while (0) + +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + #endif diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 401e38be1f7..66ac9c92620 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -458,3 +458,24 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } + +/* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Let's declare a mapping + * for it so it is visible through ptrace and /proc//mem. + */ + +int vectors_user_mapping(void) +{ + struct mm_struct *mm = current->mm; + return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC | + VM_ALWAYSDUMP | VM_RESERVED, + NULL); +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; +} -- cgit v1.2.3-70-g09d2 From ccdf2e1bca8a45eaf89eb142dbed3551886413fe Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 27 Sep 2010 18:12:12 +0100 Subject: ARM: 6412/1: kprobes-decode: add support for MOVW instruction The MOVW instruction moves a 16-bit immediate into the bottom halfword of the destination register. This patch ensures that kprobes leaves the 16-bit immediate intact, rather than assume a 12-bit immediate and mask out the upper 4 bits. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/kprobes-decode.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8bccbfa693f..2c1f0050c9c 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx - * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx + * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx * ALU op with S bit and Rd == 15 : * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ - if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ + if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ + (insn & 0x0ff00000) == 0x03400000 || /* Undef */ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ return INSN_REJECTED; @@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ - insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ + insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_imm_rwflags : emulate_alu_imm_rflags; -- cgit v1.2.3-70-g09d2