From 22f975f4ffa707ea24507f6899bb9f5a1ff034bc Mon Sep 17 00:00:00 2001 From: Nikola Valerjev Date: Sat, 10 Dec 2005 11:59:15 +0000 Subject: [ARM] 3200/1: Singlestep over ARM BX and BLX instructions using ptrace fix Patch from Nikola Valerjev Single stepping an application using ptrace() fails over ARM instructions BX and BLX. Steps to reproduce: Compile and link the following files main.c ----- void foo(); int main() { foo(); return 0; } foo.s ----- .text .globl foo foo: BX LR Using ptrace() functionality, run to main(), and start singlestepping. Singlestep over \"BX LR\" instruction won\'t transfer the control back to main, but run the code to completion. This problems seems to be in the function get_branch_address() in arch/arm/kernel/ptrace.c. The function doesn\'t seem to recognize BX and BLX instructions as branches. BX and BLX instructions can be used to convert from ARM to Thumb mode if the target address has the low bit set. However, they are also perfectly legal in the ARM only mode. Although other things in the kernel seem to indicate that only ARM mode is accepted (and not Thumb), many compilers will generate BX and BLX instructions even when generating ARM only code. Signed-off-by: Nikola Valerjev Signed-off-by: Russell King --- arch/arm/kernel/ptrace.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 9a340e790da..2b84f78d7b0 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -242,6 +242,15 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in */ long aluop1, aluop2, ccbit; + if ((insn & 0x0fffffd0) == 0x012fff10) { + /* + * bx or blx + */ + alt = get_user_reg(child, insn & 15); + break; + } + + if ((insn & 0xf000) != 0xf000) break; -- cgit v1.2.3-70-g09d2 From c2e2611425a956d25d2948c5d95d3848c4db1257 Mon Sep 17 00:00:00 2001 From: Daniel Jacobowitz Date: Wed, 14 Dec 2005 22:04:22 +0000 Subject: [ARM] 3205/1: Handle new EABI relocations when loading kernel modules. Patch from Daniel Jacobowitz Handle new EABI relocations when loading kernel modules. This is necessary for CONFIG_AEABI kernels, and also for some broken (since fixed) old ABI toolchains. Signed-off-by: Daniel Jacobowitz Signed-off-by: Russell King --- arch/arm/kernel/module.c | 2 ++ include/asm-arm/elf.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 6055e1427ba..055bf5d2889 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -101,6 +101,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, break; case R_ARM_PC24: + case R_ARM_CALL: + case R_ARM_JUMP24: offset = (*(u32 *)loc & 0x00ffffff) << 2; if (offset & 0x02000000) offset -= 0x04000000; diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h index 7da97a93754..2d44b42d184 100644 --- a/include/asm-arm/elf.h +++ b/include/asm-arm/elf.h @@ -22,6 +22,8 @@ typedef unsigned long elf_freg_t[3]; #define R_ARM_NONE 0 #define R_ARM_PC24 1 #define R_ARM_ABS32 2 +#define R_ARM_CALL 28 +#define R_ARM_JUMP24 29 #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -- cgit v1.2.3-70-g09d2 From 567bd98017d9c9f2ac1c148ddc78c062e8abd398 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 17 Dec 2005 15:25:42 +0000 Subject: [ARM] Fix sys_sendto and sys_recvfrom 6-arg syscalls Rather than providing more wrappers for 6-arg syscalls, arrange for them to be supported as standard. This just means that we always store the 6th argument on the stack, rather than in the wrappers. This means we eliminate the wrappers for: * sys_futex * sys_arm_fadvise64_64 * sys_mbind * sys_ipc Signed-off-by: Russell King --- arch/arm/kernel/calls.S | 8 ++++---- arch/arm/kernel/entry-common.S | 20 ++------------------ 2 files changed, 6 insertions(+), 22 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 2ad4aa2a153..55076a75e5b 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -131,7 +131,7 @@ __syscall_start: .long sys_wait4 /* 115 */ .long sys_swapoff .long sys_sysinfo - .long sys_ipc_wrapper + .long sys_ipc .long sys_fsync .long sys_sigreturn_wrapper /* 120 */ .long sys_clone_wrapper @@ -254,7 +254,7 @@ __syscall_start: .long sys_fremovexattr .long sys_tkill .long sys_sendfile64 -/* 240 */ .long sys_futex_wrapper +/* 240 */ .long sys_futex .long sys_sched_setaffinity .long sys_sched_getaffinity .long sys_io_setup @@ -284,7 +284,7 @@ __syscall_start: .long sys_fstatfs64 .long sys_tgkill .long sys_utimes -/* 270 */ .long sys_arm_fadvise64_64_wrapper +/* 270 */ .long sys_arm_fadvise64_64 .long sys_pciconfig_iobase .long sys_pciconfig_read .long sys_pciconfig_write @@ -333,7 +333,7 @@ __syscall_start: .long sys_inotify_init .long sys_inotify_add_watch .long sys_inotify_rm_watch - .long sys_mbind_wrapper + .long sys_mbind /* 320 */ .long sys_get_mempolicy .long sys_set_mempolicy __syscall_end: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f7f18307523..e2b42997ad3 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -145,7 +145,7 @@ ENTRY(vector_swi) #endif enable_irq - str r4, [sp, #-S_OFF]! @ push fifth arg + stmdb sp!, {r4, r5} @ push fifth and sixth args get_thread_info tsk ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing @@ -204,7 +204,7 @@ ENTRY(sys_call_table) * Special system call wrappers */ @ r0 = syscall number -@ r5 = syscall table +@ r8 = syscall table .type sys_syscall, #function sys_syscall: eor scno, r0, #__NR_SYSCALL_BASE @@ -255,22 +255,6 @@ sys_sigaltstack_wrapper: ldr r2, [sp, #S_OFF + S_SP] b do_sigaltstack -sys_futex_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_futex - -sys_arm_fadvise64_64_wrapper: - str r5, [sp, #4] @ push r5 to stack - b sys_arm_fadvise64_64 - -sys_mbind_wrapper: - str r5, [sp, #4] - b sys_mbind - -sys_ipc_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_ipc - /* * Note: off_4k (r5) is always units of 4K. If we can't do the requested * offset, we return EINVAL. -- cgit v1.2.3-70-g09d2 From 7c612bfd4ed3064fd48a4877a114c8186547367b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 19 Dec 2005 22:20:51 +0000 Subject: [ARM] 3210/1: add missing memory barrier helper for NPTL support Patch from Nicolas Pitre Strictly speaking, the NPTL kernel helpers are required for pre ARMv6 only. They are available on ARMv6+ as well for obvious compatibility reasons. However there are cases where extra memory barriers are needed when using an SMP ARMv6 machine but not on pre-ARMv6. This patch adds a memory barrier kernel helper that glibc can use as needed for pre-ARMv6 binaries to be forward compatible with an SMP kernel on ARMv6, as well as the necessary dmb instructions to the cmpxchg helper. Signed-off-by: Nicolas Pitre Acked-by: Daniel Jacobowitz Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 49 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d9fb819bf7c..2a8d27e18fa 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -611,6 +611,47 @@ ENTRY(__switch_to) .globl __kuser_helper_start __kuser_helper_start: +/* + * Reference prototype: + * + * void __kernel_memory_barrier(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * none + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef void (__kernel_dmb_t)(void); + * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) + * + * Apply any needed memory barrier to preserve consistency with data modified + * manually and __kuser_cmpxchg usage. + * + * This could be used as follows: + * + * #define __kernel_dmb() \ + * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ + * : : : "lr","cc" ) + */ + +__kuser_memory_barrier: @ 0xffff0fa0 + +#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif + mov pc, lr + + .align 5 + /* * Reference prototype: * @@ -642,6 +683,8 @@ __kuser_helper_start: * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * + * Note: this routine already includes memory barriers as needed. + * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ @@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 #else +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] rsbs r0, r3, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif mov pc, lr #endif -- cgit v1.2.3-70-g09d2