summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
authorNicolas Pitre <nico@org.rmk.(none)>2005-04-29 22:08:33 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-04-29 22:08:33 +0100
commit2d2669b62984b8d76b05a6a045390a3250317d21 (patch)
tree822f62adf59f2e6302a16289cc99b0f9b873cfb4 /arch/arm/kernel/entry-armv.S
parent3a1e501511a1e2c665c566939047794dcf86466b (diff)
[PATCH] ARM: 2651/3: kernel helpers for NPTL support
Patch from Nicolas Pitre This patch entirely reworks the kernel assistance for NPTL on ARM. In particular this provides an efficient way to retrieve the TLS value and perform atomic operations without any instruction emulation nor special system call. This even allows for pre ARMv6 binaries to be forward compatible with SMP systems without any penalty. The problematic and performance critical operations are performed through segment of kernel provided user code reachable from user space at a fixed address in kernel memory. Those fixed entry points are within the vector page so we basically get it for free as no extra memory page is required and nothing else may be mapped at that location anyway. This is different from (but doesn't preclude) a full blown VDSO implementation, however a VDSO would prevent some assembly tricks with constants that allows for efficient branching to those code segments. And since those code segments only use a few cycles before returning to user code, the overhead of a VDSO far call would add a significant overhead to such minimalistic operations. The ARM_NR_set_tls syscall also changed number. This is done for two reasons: 1) this patch changes the way the TLS value was previously meant to be retrieved, therefore we ensure whatever library using the old way gets fixed (they only exist in private tree at the moment since the NPTL work is still progressing). 2) the previous number was allocated in a range causing an undefined instruction trap on kernels not supporting that syscall and it was determined that allocating it in a range returning -ENOSYS would be much nicer for libraries trying to determine if the feature is present or not. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S213
1 files changed, 212 insertions, 1 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2a5c3fe09a9..080df907f24 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -269,6 +269,12 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr
+#if __LINUX_ARM_ARCH__ < 6
+ @ make sure our user space atomic helper is aborted
+ cmp r2, #VIRT_OFFSET
+ bichs r3, r3, #PSR_Z_BIT
+#endif
+
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@@ -499,8 +505,12 @@ ENTRY(__switch_to)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
+#ifdef CONFIG_HAS_TLS_REG
+ mcr p15, 0, r3, c13, c0, 3 @ set TLS register
+#else
mov r4, #0xffff0fff
- str r3, [r4, #-3] @ Set TLS ptr
+ str r3, [r4, #-15] @ TLS val at 0xffff0ff0
+#endif
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
@@ -519,6 +529,207 @@ ENTRY(__switch_to)
ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
+
+/*
+ * User helpers.
+ *
+ * These are segment of kernel provided user code reachable from user space
+ * at a fixed address in kernel memory. This is used to provide user space
+ * with some operations which require kernel help because of unimplemented
+ * native feature and/or instructions in many ARM CPUs. The idea is for
+ * this code to be executed directly in user mode for best efficiency but
+ * which is too intimate with the kernel counter part to be left to user
+ * libraries. In fact this code might even differ from one CPU to another
+ * depending on the available instruction set and restrictions like on
+ * SMP systems. In other words, the kernel reserves the right to change
+ * this code as needed without warning. Only the entry points and their
+ * results are guaranteed to be stable.
+ *
+ * Each segment is 32-byte aligned and will be moved to the top of the high
+ * vector page. New segments (if ever needed) must be added in front of
+ * existing ones. This mechanism should be used only for things that are
+ * really small and justified, and not be abused freely.
+ *
+ * User space is expected to implement those things inline when optimizing
+ * for a processor that has the necessary native support, but only if such
+ * resulting binaries are already to be incompatible with earlier ARM
+ * processors due to the use of unsupported instructions other than what
+ * is provided here. In other words don't make binaries unable to run on
+ * earlier processors just for the sake of not using these kernel helpers
+ * if your compiled code is not going to use the new instructions for other
+ * purpose.
+ */
+
+ .align 5
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
+ *
+ * Input:
+ *
+ * r0 = oldval
+ * r1 = newval
+ * r2 = ptr
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = returned value (zero or non-zero)
+ * C flag = set if r0 == 0, clear if r0 != 0
+ *
+ * Clobbered:
+ *
+ * r3, ip, flags
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
+ * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ * For example, a user space atomic_add implementation could look like this:
+ *
+ * #define atomic_add(ptr, val) \
+ * ({ register unsigned int *__ptr asm("r2") = (ptr); \
+ * register unsigned int __result asm("r1"); \
+ * asm volatile ( \
+ * "1: @ atomic_add\n\t" \
+ * "ldr r0, [r2]\n\t" \
+ * "mov r3, #0xffff0fff\n\t" \
+ * "add lr, pc, #4\n\t" \
+ * "add r1, r0, %2\n\t" \
+ * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
+ * "bcc 1b" \
+ * : "=&r" (__result) \
+ * : "r" (__ptr), "rIL" (val) \
+ * : "r0","r3","ip","lr","cc","memory" ); \
+ * __result; })
+ */
+
+__kuser_cmpxchg: @ 0xffff0fc0
+
+#if __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_SMP /* sanity check */
+#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
+#endif
+
+ /*
+ * Theory of operation:
+ *
+ * We set the Z flag before loading oldval. If ever an exception
+ * occurs we can not be sure the loaded value will still be the same
+ * when the exception returns, therefore the user exception handler
+ * will clear the Z flag whenever the interrupted user code was
+ * actually from the kernel address space (see the usr_entry macro).
+ *
+ * The post-increment on the str is used to prevent a race with an
+ * exception happening just after the str instruction which would
+ * clear the Z flag although the exchange was done.
+ */
+ teq ip, ip @ set Z flag
+ ldr ip, [r2] @ load current val
+ add r3, r2, #1 @ prepare store ptr
+ teqeq ip, r0 @ compare with oldval if still allowed
+ streq r1, [r3, #-1]! @ store newval if still allowed
+ subs r0, r2, r3 @ if r2 == r3 the str occured
+ mov pc, lr
+
+#else
+
+ ldrex r3, [r2]
+ subs r3, r3, r0
+ strexeq r3, r1, [r2]
+ rsbs r0, r3, #0
+ mov pc, lr
+
+#endif
+
+ .align 5
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_get_tls(void)
+ *
+ * Input:
+ *
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = TLS value
+ *
+ * Clobbered:
+ *
+ * the Z flag might be lost
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_get_tls_t)(void);
+ * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
+ *
+ * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
+ *
+ * This could be used as follows:
+ *
+ * #define __kernel_get_tls() \
+ * ({ register unsigned int __val asm("r0"); \
+ * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
+ * : "=r" (__val) : : "lr","cc" ); \
+ * __val; })
+ */
+
+__kuser_get_tls: @ 0xffff0fe0
+
+#ifndef CONFIG_HAS_TLS_REG
+
+#ifdef CONFIG_SMP /* sanity check */
+#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong"
+#endif
+
+ ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
+ mov pc, lr
+
+#else
+
+ mrc p15, 0, r0, c13, c0, 3 @ read TLS register
+ mov pc, lr
+
+#endif
+
+ .rep 5
+ .word 0 @ pad up to __kuser_helper_version
+ .endr
+
+/*
+ * Reference declaration:
+ *
+ * extern unsigned int __kernel_helper_version;
+ *
+ * Definition and user space usage example:
+ *
+ * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
+ *
+ * User space may read this to determine the curent number of helpers
+ * available.
+ */
+
+__kuser_helper_version: @ 0xffff0ffc
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
+
+ .globl __kuser_helper_end
+__kuser_helper_end:
+
+
/*
* Vector stubs.
*