summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c20
-rw-r--r--arch/arm/kernel/entry-armv.S276
-rw-r--r--arch/arm/kernel/entry-common.S65
-rw-r--r--arch/arm/kernel/entry-header.S143
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/process.c25
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/kernel/sys_arm.c14
-rw-r--r--arch/arm/kernel/traps.c69
-rw-r--r--arch/arm/kernel/vmlinux.lds.S3
11 files changed, 404 insertions, 224 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 07a56ff6149..4a2af55e134 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -31,8 +31,3 @@ head-y := head.o
obj-$(CONFIG_DEBUG_LL) += debug.o
extra-y := $(head-y) init_task.o vmlinux.lds
-
-# Spell out some dependencies that aren't automatically figured out
-$(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h
-$(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \
- $(obj)/calls.S
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 99d43259ff8..c1ff4d1f1bf 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -64,6 +64,26 @@ int main(void)
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7);
BLANK();
+ DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
+ DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
+ DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2));
+ DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3));
+ DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4));
+ DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5));
+ DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6));
+ DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7));
+ DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8));
+ DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9));
+ DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10));
+ DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp));
+ DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip));
+ DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp));
+ DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr));
+ DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
+ DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
+ DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
+ DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ BLANK();
#if __LINUX_ARM_ARCH__ >= 6
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
BLANK();
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb27c317d94..4eb36155dc9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -14,12 +14,12 @@
* it to save wrong values... Be aware!
*/
#include <linux/config.h>
-#include <linux/init.h>
-#include <asm/thread_info.h>
#include <asm/glue.h>
-#include <asm/ptrace.h>
#include <asm/vfpmacros.h>
+#include <asm/hardware.h> /* should be moved into entry-macro.S */
+#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
+#include <asm/arch/entry-macro.S>
#include "entry-header.S"
@@ -118,7 +118,7 @@ __dabt_svc:
@
@ IRQs off again before pulling preserved data off the stack
@
- disable_irq r0
+ disable_irq
@
@ restore SPSR and restart the instruction
@@ -198,7 +198,7 @@ __und_svc:
@
@ IRQs off again before pulling preserved data off the stack
@
-1: disable_irq r0
+1: disable_irq
@
@ restore SPSR and restart the instruction
@@ -232,7 +232,7 @@ __pabt_svc:
@
@ IRQs off again before pulling preserved data off the stack
@
- disable_irq r0
+ disable_irq
@
@ restore SPSR and restart the instruction
@@ -269,6 +269,12 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr
+#if __LINUX_ARM_ARCH__ < 6
+ @ make sure our user space atomic helper is aborted
+ cmp r2, #VIRT_OFFSET
+ bichs r3, r3, #PSR_Z_BIT
+#endif
+
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@@ -316,7 +322,7 @@ __dabt_usr:
@
@ IRQs on, then call the main handler
@
- enable_irq r2
+ enable_irq
mov r2, sp
adr lr, ret_from_exception
b do_DataAbort
@@ -418,7 +424,7 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
- enable_irq r7
+ enable_irq
add pc, pc, r8, lsr #6
mov r0, r0
@@ -472,7 +478,7 @@ fpundefinstr:
__pabt_usr:
usr_entry abt
- enable_irq r0 @ Enable interrupts
+ enable_irq @ Enable interrupts
mov r0, r2 @ address (pc)
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
@@ -499,8 +505,12 @@ ENTRY(__switch_to)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
+#if defined(CONFIG_HAS_TLS_REG)
+ mcr p15, 0, r3, c13, c0, 3 @ set TLS register
+#elif !defined(CONFIG_TLS_REG_EMUL)
mov r4, #0xffff0fff
- str r3, [r4, #-3] @ Set TLS ptr
+ str r3, [r4, #-15] @ TLS val at 0xffff0ff0
+#endif
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
@@ -519,11 +529,209 @@ ENTRY(__switch_to)
ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
+
+/*
+ * User helpers.
+ *
+ * These are segment of kernel provided user code reachable from user space
+ * at a fixed address in kernel memory. This is used to provide user space
+ * with some operations which require kernel help because of unimplemented
+ * native feature and/or instructions in many ARM CPUs. The idea is for
+ * this code to be executed directly in user mode for best efficiency but
+ * which is too intimate with the kernel counter part to be left to user
+ * libraries. In fact this code might even differ from one CPU to another
+ * depending on the available instruction set and restrictions like on
+ * SMP systems. In other words, the kernel reserves the right to change
+ * this code as needed without warning. Only the entry points and their
+ * results are guaranteed to be stable.
+ *
+ * Each segment is 32-byte aligned and will be moved to the top of the high
+ * vector page. New segments (if ever needed) must be added in front of
+ * existing ones. This mechanism should be used only for things that are
+ * really small and justified, and not be abused freely.
+ *
+ * User space is expected to implement those things inline when optimizing
+ * for a processor that has the necessary native support, but only if such
+ * resulting binaries are already to be incompatible with earlier ARM
+ * processors due to the use of unsupported instructions other than what
+ * is provided here. In other words don't make binaries unable to run on
+ * earlier processors just for the sake of not using these kernel helpers
+ * if your compiled code is not going to use the new instructions for other
+ * purpose.
+ */
+
+ .align 5
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
+ *
+ * Input:
+ *
+ * r0 = oldval
+ * r1 = newval
+ * r2 = ptr
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = returned value (zero or non-zero)
+ * C flag = set if r0 == 0, clear if r0 != 0
+ *
+ * Clobbered:
+ *
+ * r3, ip, flags
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
+ * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ * For example, a user space atomic_add implementation could look like this:
+ *
+ * #define atomic_add(ptr, val) \
+ * ({ register unsigned int *__ptr asm("r2") = (ptr); \
+ * register unsigned int __result asm("r1"); \
+ * asm volatile ( \
+ * "1: @ atomic_add\n\t" \
+ * "ldr r0, [r2]\n\t" \
+ * "mov r3, #0xffff0fff\n\t" \
+ * "add lr, pc, #4\n\t" \
+ * "add r1, r0, %2\n\t" \
+ * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
+ * "bcc 1b" \
+ * : "=&r" (__result) \
+ * : "r" (__ptr), "rIL" (val) \
+ * : "r0","r3","ip","lr","cc","memory" ); \
+ * __result; })
+ */
+
+__kuser_cmpxchg: @ 0xffff0fc0
+
+#if __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_SMP /* sanity check */
+#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
+#endif
+
+ /*
+ * Theory of operation:
+ *
+ * We set the Z flag before loading oldval. If ever an exception
+ * occurs we can not be sure the loaded value will still be the same
+ * when the exception returns, therefore the user exception handler
+ * will clear the Z flag whenever the interrupted user code was
+ * actually from the kernel address space (see the usr_entry macro).
+ *
+ * The post-increment on the str is used to prevent a race with an
+ * exception happening just after the str instruction which would
+ * clear the Z flag although the exchange was done.
+ */
+ teq ip, ip @ set Z flag
+ ldr ip, [r2] @ load current val
+ add r3, r2, #1 @ prepare store ptr
+ teqeq ip, r0 @ compare with oldval if still allowed
+ streq r1, [r3, #-1]! @ store newval if still allowed
+ subs r0, r2, r3 @ if r2 == r3 the str occured
+ mov pc, lr
+
+#else
+
+ ldrex r3, [r2]
+ subs r3, r3, r0
+ strexeq r3, r1, [r2]
+ rsbs r0, r3, #0
+ mov pc, lr
+
+#endif
+
+ .align 5
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_get_tls(void)
+ *
+ * Input:
+ *
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = TLS value
+ *
+ * Clobbered:
+ *
+ * the Z flag might be lost
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_get_tls_t)(void);
+ * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
+ *
+ * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
+ *
+ * This could be used as follows:
+ *
+ * #define __kernel_get_tls() \
+ * ({ register unsigned int __val asm("r0"); \
+ * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
+ * : "=r" (__val) : : "lr","cc" ); \
+ * __val; })
+ */
+
+__kuser_get_tls: @ 0xffff0fe0
+
+#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
+
+ ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
+ mov pc, lr
+
+#else
+
+ mrc p15, 0, r0, c13, c0, 3 @ read TLS register
+ mov pc, lr
+
+#endif
+
+ .rep 5
+ .word 0 @ pad up to __kuser_helper_version
+ .endr
+
+/*
+ * Reference declaration:
+ *
+ * extern unsigned int __kernel_helper_version;
+ *
+ * Definition and user space usage example:
+ *
+ * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
+ *
+ * User space may read this to determine the curent number of helpers
+ * available.
+ */
+
+__kuser_helper_version: @ 0xffff0ffc
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
+
+ .globl __kuser_helper_end
+__kuser_helper_end:
+
+
/*
* Vector stubs.
*
- * This code is copied to 0x200 or 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's.
+ * This code is copied to 0xffff0200 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not
+ * exceed 0x300 bytes.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -544,7 +752,7 @@ vector_\name:
@
mrs r13, cpsr
bic r13, r13, #MODE_MASK
- orr r13, r13, #MODE_SVC
+ orr r13, r13, #SVC_MODE
msr spsr_cxsf, r13 @ switch to SVC_32 mode
and lr, lr, #15
@@ -552,6 +760,7 @@ vector_\name:
movs pc, lr @ Changes mode and branches
.endm
+ .globl __stubs_start
__stubs_start:
/*
* Interrupt dispatcher
@@ -686,37 +895,24 @@ vector_addrexcptn:
.LCsabt:
.word __temp_abt
+ .globl __stubs_end
__stubs_end:
- .equ __real_stubs_start, .LCvectors + 0x200
+ .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
-.LCvectors:
+ .globl __vectors_start
+__vectors_start:
swi SYS_ERROR0
- b __real_stubs_start + (vector_und - __stubs_start)
- ldr pc, __real_stubs_start + (.LCvswi - __stubs_start)
- b __real_stubs_start + (vector_pabt - __stubs_start)
- b __real_stubs_start + (vector_dabt - __stubs_start)
- b __real_stubs_start + (vector_addrexcptn - __stubs_start)
- b __real_stubs_start + (vector_irq - __stubs_start)
- b __real_stubs_start + (vector_fiq - __stubs_start)
-
-ENTRY(__trap_init)
- stmfd sp!, {r4 - r6, lr}
-
- mov r0, #0xff000000
- orr r0, r0, #0x00ff0000 @ high vectors position
- adr r1, .LCvectors @ set up the vectors
- ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr}
- stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
-
- add r2, r0, #0x200
- adr r0, __stubs_start @ copy stubs to 0x200
- adr r1, __stubs_end
-1: ldr r3, [r0], #4
- str r3, [r2], #4
- cmp r0, r1
- blt 1b
- LOADREGS(fd, sp!, {r4 - r6, pc})
+ b vector_und + stubs_offset
+ ldr pc, .LCvswi + stubs_offset
+ b vector_pabt + stubs_offset
+ b vector_dabt + stubs_offset
+ b vector_addrexcptn + stubs_offset
+ b vector_irq + stubs_offset
+ b vector_fiq + stubs_offset
+
+ .globl __vectors_end
+__vectors_end:
.data
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 53a7e0dea44..3f8d0e3aefa 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -9,19 +9,10 @@
*/
#include <linux/config.h>
-#include <asm/thread_info.h>
-#include <asm/ptrace.h>
#include <asm/unistd.h>
#include "entry-header.S"
-/*
- * We rely on the fact that R0 is at the bottom of the stack (due to
- * slow/fast restore user regs).
- */
-#if S_R0 != 0
-#error "Please fix"
-#endif
.align 5
/*
@@ -30,11 +21,19 @@
* stack.
*/
ret_fast_syscall:
- disable_irq r1 @ disable interrupts
+ disable_irq @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
- fast_restore_user_regs
+
+ @ fast_restore_user_regs
+ ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #S_OFF + S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ mov r0, r0
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
/*
* Ok, we need to do extra processing, enter the slow path.
@@ -49,7 +48,7 @@ work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_notify_resume
- disable_irq r1 @ disable interrupts
+ disable_irq @ disable interrupts
b no_work_pending
work_resched:
@@ -59,12 +58,19 @@ work_resched:
*/
ENTRY(ret_to_user)
ret_slow_syscall:
- disable_irq r1 @ disable interrupts
+ disable_irq @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne work_pending
no_work_pending:
- slow_restore_user_regs
+ @ slow_restore_user_regs
+ ldr r1, [sp, #S_PSR] @ get calling cpsr
+ ldr lr, [sp, #S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
+ mov r0, r0
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
/*
* This is how we return from a fork.
@@ -116,9 +122,26 @@ ENTRY(ret_from_fork)
.align 5
ENTRY(vector_swi)
- save_user_regs
+ sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ Calling r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Calling sp, lr
+ mrs r8, spsr @ called from non-FIQ mode, so ok.
+ str lr, [sp, #S_PC] @ Save calling PC
+ str r8, [sp, #S_PSR] @ Save CPSR
+ str r0, [sp, #S_OLD_R0] @ Save OLD_R0
zero_fp
- get_scno
+
+ /*
+ * Get the system call number.
+ */
+#ifdef CONFIG_ARM_THUMB
+ tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
+ addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
+ ldreq scno, [lr, #-4]
+#else
+ ldr scno, [lr, #-4] @ get SWI instruction
+#endif
arm710_bug_check scno, ip
#ifdef CONFIG_ALIGNMENT_TRAP
@@ -126,14 +149,14 @@ ENTRY(vector_swi)
ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register
#endif
- enable_irq ip
+ enable_irq
str r4, [sp, #-S_OFF]! @ push fifth arg
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
bic scno, scno, #0xff000000 @ mask off SWI op-code
- eor scno, scno, #OS_NUMBER << 20 @ check OS number
+ eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
adr tbl, sys_call_table @ load syscall table pointer
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
@@ -144,8 +167,8 @@ ENTRY(vector_swi)
add r1, sp, #S_OFF
2: mov why, #0 @ no longer a real syscall
- cmp scno, #ARMSWI_OFFSET
- eor r0, scno, #OS_NUMBER << 20 @ put OS number back
+ cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+ eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
b sys_ni_syscall @ not private func
@@ -190,7 +213,7 @@ ENTRY(sys_call_table)
@ r5 = syscall table
.type sys_syscall, #function
sys_syscall:
- eor scno, r0, #OS_NUMBER << 20
+ eor scno, r0, #__NR_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
stmloia sp, {r5, r6} @ shuffle args
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4039d8c120b..a3d40a0e2b0 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -1,24 +1,11 @@
-#include <linux/config.h> /* for CONFIG_ARCH_xxxx */
+#include <linux/config.h>
+#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/errno.h>
-#include <asm/hardware.h>
-#include <asm/arch/irqs.h>
-#include <asm/arch/entry-macro.S>
-
-#ifndef MODE_SVC
-#define MODE_SVC 0x13
-#endif
-
- .macro zero_fp
-#ifdef CONFIG_FRAME_POINTER
- mov fp, #0
-#endif
- .endm
-
- .text
+#include <asm/thread_info.h>
@ Bad Abort numbers
@ -----------------
@@ -29,113 +16,44 @@
#define BAD_IRQ 3
#define BAD_UNDEFINSTR 4
-#define PT_TRACESYS 0x00000002
-
-@ OS version number used in SWIs
-@ RISC OS is 0
-@ RISC iX is 8
@
-#define OS_NUMBER 9
-#define ARMSWI_OFFSET 0x000f0000
-
+@ Most of the stack format comes from struct pt_regs, but with
+@ the addition of 8 bytes for storing syscall args 5 and 6.
@
-@ Stack format (ensured by USER_* and SVC_*)
-@
-#define S_FRAME_SIZE 72
-#define S_OLD_R0 68
-#define S_PSR 64
-
-#define S_PC 60
-#define S_LR 56
-#define S_SP 52
-#define S_IP 48
-#define S_FP 44
-#define S_R10 40
-#define S_R9 36
-#define S_R8 32
-#define S_R7 28
-#define S_R6 24
-#define S_R5 20
-#define S_R4 16
-#define S_R3 12
-#define S_R2 8
-#define S_R1 4
-#define S_R0 0
#define S_OFF 8
- .macro set_cpsr_c, reg, mode
- msr cpsr_c, \mode
+/*
+ * The SWI code relies on the fact that R0 is at the bottom of the stack
+ * (due to slow/fast restore user regs).
+ */
+#if S_R0 != 0
+#error "Please fix"
+#endif
+
+ .macro zero_fp
+#ifdef CONFIG_FRAME_POINTER
+ mov fp, #0
+#endif
.endm
#if __LINUX_ARM_ARCH__ >= 6
- .macro disable_irq, temp
+ .macro disable_irq
cpsid i
.endm
- .macro enable_irq, temp
+ .macro enable_irq
cpsie i
.endm
#else
- .macro disable_irq, temp
- set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
+ .macro disable_irq
+ msr cpsr_c, #PSR_I_BIT | SVC_MODE
.endm
- .macro enable_irq, temp
- set_cpsr_c \temp, #MODE_SVC
+ .macro enable_irq
+ msr cpsr_c, #SVC_MODE
.endm
#endif
- .macro save_user_regs
- sub sp, sp, #S_FRAME_SIZE
- stmia sp, {r0 - r12} @ Calling r0 - r12
- add r8, sp, #S_PC
- stmdb r8, {sp, lr}^ @ Calling sp, lr
- mrs r8, spsr @ called from non-FIQ mode, so ok.
- str lr, [sp, #S_PC] @ Save calling PC
- str r8, [sp, #S_PSR] @ Save CPSR
- str r0, [sp, #S_OLD_R0] @ Save OLD_R0
- .endm
-
- .macro restore_user_regs
- ldr r1, [sp, #S_PSR] @ Get calling cpsr
- disable_irq ip @ disable IRQs
- ldr lr, [sp, #S_PC]! @ Get PC
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
-/*
- * Must be called with IRQs already disabled.
- */
- .macro fast_restore_user_regs
- ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
-/*
- * Must be called with IRQs already disabled.
- */
- .macro slow_restore_user_regs
- ldr r1, [sp, #S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
- .macro mask_pc, rd, rm
- .endm
-
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
@@ -165,18 +83,3 @@ scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
-
-/*
- * Get the system call number.
- */
- .macro get_scno
-#ifdef CONFIG_ARM_THUMB
- tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
- addne scno, r7, #OS_NUMBER << 20 @ put OS number in
- ldreq scno, [lr, #-4]
-
-#else
- mask_pc lr, lr
- ldr scno, [lr, #-4] @ get SWI instruction
-#endif
- .endm
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 171b3e811c7..4733877296d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -19,6 +19,7 @@
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/constants.h>
+#include <asm/thread_info.h>
#include <asm/system.h>
#define PROCINFO_MMUFLAGS 8
@@ -131,7 +132,7 @@ __switch_data:
.long processor_id @ r4
.long __machine_arch_type @ r5
.long cr_alignment @ r6
- .long init_thread_union+8192 @ sp
+ .long init_thread_union + THREAD_START_SP @ sp
/*
* The following fragment of code is executed with the MMU on, and uses
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 26eacd3e5de..8f146a4b475 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -256,8 +256,6 @@ static unsigned long *thread_info_head;
static unsigned int nr_thread_info;
#define EXTRA_TASK_STRUCT 4
-#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
-#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
struct thread_info *alloc_thread_info(struct task_struct *task)
{
@@ -274,17 +272,16 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
}
if (!thread)
- thread = ll_alloc_task_struct();
+ thread = (struct thread_info *)
+ __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
-#ifdef CONFIG_MAGIC_SYSRQ
+#ifdef CONFIG_DEBUG_STACK_USAGE
/*
* The stack must be cleared if you want SYSRQ-T to
* give sensible stack usage information
*/
- if (thread) {
- char *p = (char *)thread;
- memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
- }
+ if (thread)
+ memzero(thread, THREAD_SIZE);
#endif
return thread;
}
@@ -297,7 +294,7 @@ void free_thread_info(struct thread_info *thread)
thread_info_head = p;
nr_thread_info += 1;
} else
- ll_free_task_struct(thread);
+ free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
}
/*
@@ -350,7 +347,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = p->thread_info;
struct pt_regs *childregs;
- childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1;
+ childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1;
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
@@ -447,15 +444,17 @@ EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, lr;
- unsigned long stack_page;
+ unsigned long stack_start, stack_end;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = 4096 + (unsigned long)p->thread_info;
+ stack_start = (unsigned long)(p->thread_info + 1);
+ stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
+
fp = thread_saved_fp(p);
do {
- if (fp < stack_page || fp > 4092+stack_page)
+ if (fp < stack_start || fp > stack_end)
return 0;
lr = pc_pointer (((unsigned long *)fp)[-1]);
if (!in_sched_functions(lr))
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index efd7a341614..cd99b83f14c 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -19,6 +19,7 @@
#include <linux/user.h>
#include <linux/security.h>
#include <linux/init.h>
+#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -693,7 +694,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
case PTRACE_SYSCALL:
case PTRACE_CONT:
ret = -EIO;
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
@@ -728,7 +729,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
*/
case PTRACE_SINGLESTEP:
ret = -EIO;
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
break;
child->ptrace |= PT_SINGLESTEP;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 7ba6342cf93..f897ce2ccf0 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third,
}
}
-asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg,
- unsigned long __user *addr)
-{
- unsigned long ret;
- long err;
-
- err = do_shmat(shmid, shmaddr, shmflg, &ret);
- if (err == 0)
- err = put_user(ret, addr);
- return err;
-}
-
/* Fork a new task - this creates a new program thread.
* This is called indirectly via a small wrapper
*/
@@ -314,7 +302,7 @@ long execve(const char *filename, char **argv, char **envp)
"b ret_to_user"
:
: "r" (current_thread_info()),
- "Ir" (THREAD_SIZE - 8 - sizeof(regs)),
+ "Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "ip", "memory");
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6e31718f600..14df16b983f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -218,7 +218,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
tsk->comm, tsk->pid, tsk->thread_info + 1);
if (!user_mode(regs) || in_interrupt()) {
- dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info);
+ dump_mem("Stack: ", regs->ARM_sp,
+ THREAD_SIZE + (unsigned long)tsk->thread_info);
dump_backtrace(regs, tsk);
dump_instr(regs);
}
@@ -450,13 +451,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
case NR(set_tls):
thread->tp_value = regs->ARM_r0;
+#if defined(CONFIG_HAS_TLS_REG)
+ asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
+#elif !defined(CONFIG_TLS_REG_EMUL)
/*
- * Our user accessible TLS ptr is located at 0xffff0ffc.
- * On SMP read access to this address must raise a fault
- * and be emulated from the data abort handler.
- * m
+ * User space must never try to access this directly.
+ * Expect your app to break eventually if you do so.
+ * The user helper at 0xffff0fe0 must be used instead.
+ * (see entry-armv.S for details)
*/
- *((unsigned long *)0xffff0ffc) = thread->tp_value;
+ *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+#endif
return 0;
default:
@@ -493,6 +498,44 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
return 0;
}
+#ifdef CONFIG_TLS_REG_EMUL
+
+/*
+ * We might be running on an ARMv6+ processor which should have the TLS
+ * register but for some reason we can't use it, or maybe an SMP system
+ * using a pre-ARMv6 processor (there are apparently a few prototypes like
+ * that in existence) and therefore access to that register must be
+ * emulated.
+ */
+
+static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
+{
+ int reg = (instr >> 12) & 15;
+ if (reg == 15)
+ return 1;
+ regs->uregs[reg] = current_thread_info()->tp_value;
+ regs->ARM_pc += 4;
+ return 0;
+}
+
+static struct undef_hook arm_mrc_hook = {
+ .instr_mask = 0x0fff0fff,
+ .instr_val = 0x0e1d0f70,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = 0,
+ .fn = get_tp_trap,
+};
+
+static int __init arm_mrc_hook_init(void)
+{
+ register_undef_hook(&arm_mrc_hook);
+ return 0;
+}
+
+late_initcall(arm_mrc_hook_init);
+
+#endif
+
void __bad_xchg(volatile void *ptr, int size)
{
printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
@@ -578,9 +621,19 @@ EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
- extern void __trap_init(void);
+ extern char __stubs_start[], __stubs_end[];
+ extern char __vectors_start[], __vectors_end[];
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- __trap_init();
+ /*
+ * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
+ * into the vector page, mapped at 0xffff0000, and ensure these
+ * are visible to the instruction stream.
+ */
+ memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
+ memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
+ memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a39c6a42d68..ad2d66c93a5 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <linux/config.h>
+#include <asm/thread_info.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -103,7 +104,7 @@ SECTIONS
__data_loc = ALIGN(4); /* location in binary */
. = DATAADDR;
#else
- . = ALIGN(8192);
+ . = ALIGN(THREAD_SIZE);
__data_loc = .;
#endif