summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/armksyms.c13
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/bios32.c7
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c83
-rw-r--r--arch/arm/kernel/entry-armv.S30
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S4
-rw-r--r--arch/arm/kernel/irq.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S4
-rw-r--r--arch/arm/kernel/process.c85
-rw-r--r--arch/arm/kernel/ptrace.c36
-rw-r--r--arch/arm/kernel/setup.c44
-rw-r--r--arch/arm/kernel/signal.c265
-rw-r--r--arch/arm/kernel/time.c24
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
18 files changed, 715 insertions, 213 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a601b8b55f3..7cffbaef064 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -22,6 +22,9 @@ obj-$(CONFIG_PCI) += bios32.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
+obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
+AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
+
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index c49b5d4d7fc..da69e660574 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
/* user mem (segment) */
-EXPORT_SYMBOL(__arch_copy_from_user);
-EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__arch_clear_user);
-EXPORT_SYMBOL(__arch_strnlen_user);
-EXPORT_SYMBOL(__arch_strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
+#endif
/* crypto hash */
EXPORT_SYMBOL(sha_transform);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 396efba9bac..447ede5143a 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -60,6 +60,9 @@ int main(void)
#ifdef CONFIG_IWMMXT
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
#endif
+#ifdef CONFIG_CRUNCH
+ DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
+#endif
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index de606dfa8db..45da06fc1ba 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -304,7 +304,7 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev)
static void __devinit
pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
{
- unsigned long offset;
+ resource_size_t offset;
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
@@ -634,9 +634,9 @@ char * __init pcibios_setup(char *str)
* which might be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
@@ -702,7 +702,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/*
* Mark this as IO
*/
- vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
new file mode 100644
index 00000000000..a26886758c6
--- /dev/null
+++ b/arch/arm/kernel/crunch-bits.S
@@ -0,0 +1,305 @@
+/*
+ * arch/arm/kernel/crunch-bits.S
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
+ * Copyright (c) 2003-2004, MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch/ep93xx-regs.h>
+
+/*
+ * We can't use hex constants here due to a bug in gas.
+ */
+#define CRUNCH_MVDX0 0
+#define CRUNCH_MVDX1 8
+#define CRUNCH_MVDX2 16
+#define CRUNCH_MVDX3 24
+#define CRUNCH_MVDX4 32
+#define CRUNCH_MVDX5 40
+#define CRUNCH_MVDX6 48
+#define CRUNCH_MVDX7 56
+#define CRUNCH_MVDX8 64
+#define CRUNCH_MVDX9 72
+#define CRUNCH_MVDX10 80
+#define CRUNCH_MVDX11 88
+#define CRUNCH_MVDX12 96
+#define CRUNCH_MVDX13 104
+#define CRUNCH_MVDX14 112
+#define CRUNCH_MVDX15 120
+#define CRUNCH_MVAX0L 128
+#define CRUNCH_MVAX0M 132
+#define CRUNCH_MVAX0H 136
+#define CRUNCH_MVAX1L 140
+#define CRUNCH_MVAX1M 144
+#define CRUNCH_MVAX1H 148
+#define CRUNCH_MVAX2L 152
+#define CRUNCH_MVAX2M 156
+#define CRUNCH_MVAX2H 160
+#define CRUNCH_MVAX3L 164
+#define CRUNCH_MVAX3M 168
+#define CRUNCH_MVAX3H 172
+#define CRUNCH_DSPSC 176
+
+#define CRUNCH_SIZE 184
+
+ .text
+
+/*
+ * Lazy switching of crunch coprocessor context
+ *
+ * r10 = struct thread_info pointer
+ * r9 = ret_from_exception
+ * lr = undefined instr exit
+ *
+ * called from prefetch exception handler with interrupts disabled
+ */
+ENTRY(crunch_task_enable)
+ ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
+
+ ldr r1, [r8, #0x80]
+ tst r1, #0x00800000 @ access to crunch enabled?
+ movne pc, lr @ if so no business here
+ mov r3, #0xaa @ unlock syscon swlock
+ str r3, [r8, #0xc0]
+ orr r1, r1, #0x00800000 @ enable access to crunch
+ str r1, [r8, #0x80]
+
+ ldr r3, =crunch_owner
+ add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r2, [sp, #60] @ current task pc value
+ ldr r1, [r3] @ get current crunch owner
+ str r0, [r3] @ this task now owns crunch
+ sub r2, r2, #4 @ adjust pc back
+ str r2, [sp, #60]
+
+ ldr r2, [r8, #0x80]
+ mov r2, r2 @ flush out enable (@@@)
+
+ teq r1, #0 @ test for last ownership
+ mov lr, r9 @ normal exit from exception
+ beq crunch_load @ no owner, skip save
+
+crunch_save:
+ cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
+ cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
+ cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
+ cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
+ cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
+ cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
+ cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
+ cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
+ cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
+ cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
+ cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
+ cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
+ cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
+ cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
+ cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
+ cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
+
+#ifdef __ARMEB__
+#error fix me for ARMEB
+#endif
+
+ cfmv32al mvfx0, mvax0 @ save 72b accumulators
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
+ cfmv32am mvfx0, mvax0
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
+ cfmv32ah mvfx0, mvax0
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
+ cfmv32al mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
+ cfmv32am mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
+ cfmv32ah mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
+ cfmv32al mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
+ cfmv32am mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
+ cfmv32ah mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
+ cfmv32al mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
+ cfmv32am mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
+ cfmv32ah mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
+
+ cfmv32sc mvdx0, dspsc @ save status word
+ cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
+
+ teq r0, #0 @ anything to load?
+ cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
+ moveq pc, lr
+
+crunch_load:
+ cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
+ cfmvsc32 dspsc, mvdx0
+
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
+ cfmval32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
+ cfmvam32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
+ cfmvah32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
+ cfmval32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
+ cfmvam32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
+ cfmvah32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
+ cfmval32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
+ cfmvam32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
+ cfmvah32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
+ cfmval32 mvax3, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
+ cfmvam32 mvax3, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
+ cfmvah32 mvax3, mvfx0
+
+ cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
+ cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
+ cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
+ cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
+ cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
+ cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
+ cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
+ cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
+ cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
+ cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
+ cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
+ cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
+ cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
+ cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
+ cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
+ cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
+
+ mov pc, lr
+
+/*
+ * Back up crunch regs to save area and disable access to them
+ * (mainly for gdb or sleep mode usage)
+ *
+ * r0 = struct thread_info pointer of target task or NULL for any
+ */
+ENTRY(crunch_task_disable)
+ stmfd sp!, {r4, r5, lr}
+
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r1, [r3] @ get current crunch owner
+ teq r1, #0 @ any current owner?
+ beq 1f @ no: quit
+ teq r0, #0 @ any owner?
+ teqne r1, r2 @ or specified one?
+ bne 1f @ no: quit
+
+ ldr r5, [r4, #0x80] @ enable access to crunch
+ mov r2, #0xaa
+ str r2, [r4, #0xc0]
+ orr r5, r5, #0x00800000
+ str r5, [r4, #0x80]
+
+ mov r0, #0 @ nothing to load
+ str r0, [r3] @ no more current owner
+ ldr r2, [r4, #0x80] @ flush out enable (@@@)
+ mov r2, r2
+ bl crunch_save
+
+ mov r2, #0xaa @ disable access to crunch
+ str r2, [r4, #0xc0]
+ bic r5, r5, #0x00800000
+ str r5, [r4, #0x80]
+ ldr r5, [r4, #0x80] @ flush out enable (@@@)
+ mov r5, r5
+
+1: msr cpsr_c, ip @ restore interrupt mode
+ ldmfd sp!, {r4, r5, pc}
+
+/*
+ * Copy crunch state to given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to store crunch state
+ *
+ * this is called mainly in the creation of signal stack frames
+ */
+ENTRY(crunch_task_copy)
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r3, [r3] @ get current crunch owner
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ current crunch values are in the task save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r1
+ mov r1, r2
+ mov r2, #CRUNCH_SIZE
+ b memcpy
+
+1: @ this task owns crunch regs -- grab a copy from there
+ mov r0, #0 @ nothing to load
+ mov r3, lr @ preserve return address
+ bl crunch_save
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
+
+/*
+ * Restore crunch state from given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to get crunch state from
+ *
+ * this is used to restore crunch state when unwinding a signal stack frame
+ */
+ENTRY(crunch_task_restore)
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r3, [r3] @ get current crunch owner
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ this task doesn't own crunch regs -- use its save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r2
+ mov r2, #CRUNCH_SIZE
+ b memcpy
+
+1: @ this task owns crunch regs -- load them directly
+ mov r0, r1
+ mov r1, #0 @ nothing to save
+ mov r3, lr @ preserve return address
+ bl crunch_load
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
new file mode 100644
index 00000000000..748175921f9
--- /dev/null
+++ b/arch/arm/kernel/crunch.c
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/kernel/crunch.c
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/arch/ep93xx-regs.h>
+#include <asm/thread_notify.h>
+#include <asm/io.h>
+
+struct crunch_state *crunch_owner;
+
+void crunch_task_release(struct thread_info *thread)
+{
+ local_irq_disable();
+ if (crunch_owner == &thread->crunchstate)
+ crunch_owner = NULL;
+ local_irq_enable();
+}
+
+static int crunch_enabled(u32 devcfg)
+{
+ return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE);
+}
+
+static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = (struct thread_info *)t;
+ struct crunch_state *crunch_state;
+ u32 devcfg;
+
+ crunch_state = &thread->crunchstate;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ memset(crunch_state, 0, sizeof(*crunch_state));
+
+ /*
+ * FALLTHROUGH: Ensure we don't try to overwrite our newly
+ * initialised state information on the first fault.
+ */
+
+ case THREAD_NOTIFY_RELEASE:
+ crunch_task_release(thread);
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG);
+ if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
+ devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
+ __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
+ __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block crunch_notifier_block = {
+ .notifier_call = crunch_do,
+};
+
+static int __init crunch_init(void)
+{
+ thread_register_notifier(&crunch_notifier_block);
+
+ return 0;
+}
+
+late_initcall(crunch_init);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index ab8e600c18c..6423a38839b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -20,6 +20,7 @@
#include <asm/glue.h>
#include <asm/vfpmacros.h>
#include <asm/arch/entry-macro.S>
+#include <asm/thread_notify.h>
#include "entry-header.S"
@@ -491,9 +492,15 @@ call_fpe:
b do_fpe @ CP#1 (FPE)
b do_fpe @ CP#2 (FPE)
mov pc, lr @ CP#3
+#ifdef CONFIG_CRUNCH
+ b crunch_task_enable @ CP#4 (MaverickCrunch)
+ b crunch_task_enable @ CP#5 (MaverickCrunch)
+ b crunch_task_enable @ CP#6 (MaverickCrunch)
+#else
mov pc, lr @ CP#4
mov pc, lr @ CP#5
mov pc, lr @ CP#6
+#endif
mov pc, lr @ CP#7
mov pc, lr @ CP#8
mov pc, lr @ CP#9
@@ -560,10 +567,8 @@ ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
-#ifndef CONFIG_MMU
- add r2, r2, #TI_CPU_DOMAIN
-#else
- ldr r6, [r2, #TI_CPU_DOMAIN]!
+#ifdef CONFIG_MMU
+ ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
@@ -585,21 +590,20 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
-#ifdef CONFIG_VFP
- @ Always disable VFP so we can lazily save/restore the old
- @ state. This occurs in the context of the previous thread.
- VFPFMRX r4, FPEXC
- bic r4, r4, #FPEXC_ENABLE
- VFPFMXR FPEXC, r4
-#endif
#if defined(CONFIG_IWMMXT)
bl iwmmxt_task_switch
#elif defined(CONFIG_CPU_XSCALE)
- add r4, r2, #40 @ cpu_context_save->extra
+ add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
ldmib r4, {r4, r5}
mar acc0, r4, r5
#endif
- ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ mov r5, r0
+ add r4, r2, #TI_CPU_SAVE
+ ldr r0, =thread_notify_head
+ mov r1, #THREAD_NOTIFY_SWITCH
+ bl atomic_notifier_call_chain
+ mov r0, r5
+ ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index dbcb11a31f7..75af6d6e2f2 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -271,7 +271,7 @@ ENTRY(sys_call_table)
@ r8 = syscall table
.type sys_syscall, #function
sys_syscall:
- eor scno, r0, #__NR_OABI_SYSCALL_BASE
+ bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
stmloia sp, {r5, r6} @ shuffle args
@@ -340,7 +340,7 @@ sys_mmap2:
streq r5, [sp, #4]
beq do_mmap2
mov r0, #-EINVAL
- RETINSTR(mov,pc, lr)
+ mov pc, lr
#else
str r5, [sp, #4]
b do_mmap2
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index adf62e5eaad..2af7e44218a 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -39,7 +39,7 @@
__INIT
.type stext, %function
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04f7344e356..330b9476c39 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,7 +71,7 @@
__INIT
.type stext, %function
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -104,7 +104,7 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2d5896b3618..ec20f8935e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -52,7 +52,7 @@
*/
#define MAX_IRQ_CNT 100000
-static int noirqdebug;
+static int noirqdebug __read_mostly;
static volatile unsigned long irq_err_count;
static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
@@ -81,7 +81,7 @@ irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
- irq_err_count += 1;
+ irq_err_count++;
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
@@ -342,10 +342,10 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
#ifdef CONFIG_NO_IDLE_HZ
if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
- write_seqlock(&xtime_lock);
+ spin_lock(&system_timer->dyn_tick->lock);
if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
system_timer->dyn_tick->handler(irq, 0, regs);
- write_sequnlock(&xtime_lock);
+ spin_unlock(&system_timer->dyn_tick->lock);
}
#endif
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 24c7b0477a0..a3bae95e536 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -273,7 +273,7 @@ ENTRY(iwmmxt_task_restore)
*
* r0 = previous task_struct pointer (must be preserved)
* r1 = previous thread_info pointer
- * r2 = next thread_info.cpu_domain pointer (must be preserved)
+ * r2 = next thread_info pointer (must be preserved)
*
* Called only from __switch_to with task preemption disabled.
* No need to care about preserving r4 and above.
@@ -285,7 +285,7 @@ ENTRY(iwmmxt_task_switch)
bne 1f @ yes: block them for next task
ldr r5, =concan_owner
- add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area
+ add r6, r2, #TI_IWMMXT_STATE @ get next task Concan save area
ldr r5, [r5] @ get current Concan owner
teq r5, r6 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7df6e1aaa32..e1c77ee885a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,10 +28,12 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
+#include <linux/pm.h>
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/thread_notify.h>
#include <asm/uaccess.h>
#include <asm/mach/time.h>
@@ -71,8 +73,36 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
+void arm_machine_restart(char mode)
+{
+ /*
+ * Clean and disable cache, and turn off interrupts
+ */
+ cpu_proc_fin();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(mode);
+
+ /*
+ * Now call the architecture specific reboot code.
+ */
+ arch_reset(mode);
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
+ while (1);
+}
+
/*
- * The following aren't currently used.
+ * Function pointers to optional machine specific functions
*/
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
@@ -80,6 +110,10 @@ EXPORT_SYMBOL(pm_idle);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+void (*arm_pm_restart)(char str) = arm_machine_restart;
+EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
@@ -151,33 +185,9 @@ void machine_power_off(void)
pm_power_off();
}
-
void machine_restart(char * __unused)
{
- /*
- * Clean and disable cache, and turn off interrupts
- */
- cpu_proc_fin();
-
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot(reboot_mode);
-
- /*
- * Now call the architecture specific reboot code.
- */
- arch_reset(reboot_mode);
-
- /*
- * Whoops - the architecture was unable to reboot.
- * Tell the user!
- */
- mdelay(1000);
- printk("Reboot failed -- System halted\n");
- while (1);
+ arm_pm_restart(reboot_mode);
}
void __show_regs(struct pt_regs *regs)
@@ -329,13 +339,9 @@ void exit_thread(void)
{
}
-static void default_fp_init(union fp_state *fp)
-{
- memset(fp, 0, sizeof(union fp_state));
-}
+ATOMIC_NOTIFIER_HEAD(thread_notify_head);
-void (*fp_init)(union fp_state *) = default_fp_init;
-EXPORT_SYMBOL(fp_init);
+EXPORT_SYMBOL_GPL(thread_notify_head);
void flush_thread(void)
{
@@ -344,22 +350,21 @@ void flush_thread(void)
memset(thread->used_cp, 0, sizeof(thread->used_cp));
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ memset(&thread->fpstate, 0, sizeof(union fp_state));
+
+ thread_notify(THREAD_NOTIFY_FLUSH, thread);
#if defined(CONFIG_IWMMXT)
iwmmxt_task_release(thread);
#endif
- fp_init(&thread->fpstate);
-#if defined(CONFIG_VFP)
- vfp_flush_thread(&thread->vfpstate);
-#endif
}
void release_thread(struct task_struct *dead_task)
{
-#if defined(CONFIG_VFP)
- vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
-#endif
+ struct thread_info *thread = task_thread_info(dead_task);
+
+ thread_notify(THREAD_NOTIFY_RELEASE, thread);
#if defined(CONFIG_IWMMXT)
- iwmmxt_task_release(task_thread_info(dead_task));
+ iwmmxt_task_release(thread);
#endif
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index a1d1b2906e8..c40bdc77005 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -634,6 +634,32 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
#endif
+#ifdef CONFIG_CRUNCH
+/*
+ * Get the child Crunch state.
+ */
+static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+
+ crunch_task_disable(thread); /* force it to ram */
+ return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
+ ? -EFAULT : 0;
+}
+
+/*
+ * Set the child Crunch state.
+ */
+static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+
+ crunch_task_release(thread); /* force a reload */
+ return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
+ ? -EFAULT : 0;
+}
+#endif
+
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
@@ -765,6 +791,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->ptrace_message = data;
break;
+#ifdef CONFIG_CRUNCH
+ case PTRACE_GETCRUNCHREGS:
+ ret = ptrace_getcrunchregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETCRUNCHREGS:
+ ret = ptrace_setcrunchregs(child, (void __user *)data);
+ break;
+#endif
+
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9fc9af88c60..6bdf70def01 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -119,9 +119,24 @@ DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
* Standard memory resources
*/
static struct resource mem_res[] = {
- { "Video RAM", 0, 0, IORESOURCE_MEM },
- { "Kernel text", 0, 0, IORESOURCE_MEM },
- { "Kernel data", 0, 0, IORESOURCE_MEM }
+ {
+ .name = "Video RAM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel text",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ }
};
#define video_ram mem_res[0]
@@ -129,9 +144,24 @@ static struct resource mem_res[] = {
#define kernel_data mem_res[2]
static struct resource io_res[] = {
- { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
- { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
- { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
+ {
+ .name = "reserved",
+ .start = 0x3bc,
+ .end = 0x3be,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "reserved",
+ .start = 0x378,
+ .end = 0x37f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "reserved",
+ .start = 0x278,
+ .end = 0x27f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }
};
#define lp0 io_res[0]
@@ -808,7 +838,7 @@ static int __init topology_init(void)
int cpu;
for_each_possible_cpu(cpu)
- register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
+ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a0cd0a90a10..83a8d3c95eb 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -132,18 +132,38 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
return ret;
}
-#ifdef CONFIG_IWMMXT
+#ifdef CONFIG_CRUNCH
+static int preserve_crunch_context(struct crunch_sigframe *frame)
+{
+ char kbuf[sizeof(*frame) + 8];
+ struct crunch_sigframe *kframe;
-/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
-#define IWMMXT_STORAGE_SIZE (0x98 + 8)
-#define IWMMXT_MAGIC0 0x12ef842a
-#define IWMMXT_MAGIC1 0x1c07ca71
+ /* the crunch context must be 64 bit aligned */
+ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+ kframe->magic = CRUNCH_MAGIC;
+ kframe->size = CRUNCH_STORAGE_SIZE;
+ crunch_task_copy(current_thread_info(), &kframe->storage);
+ return __copy_to_user(frame, kframe, sizeof(*frame));
+}
-struct iwmmxt_sigframe {
- unsigned long magic0;
- unsigned long magic1;
- unsigned long storage[0x98/4];
-};
+static int restore_crunch_context(struct crunch_sigframe *frame)
+{
+ char kbuf[sizeof(*frame) + 8];
+ struct crunch_sigframe *kframe;
+
+ /* the crunch context must be 64 bit aligned */
+ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+ if (__copy_from_user(kframe, frame, sizeof(*frame)))
+ return -1;
+ if (kframe->magic != CRUNCH_MAGIC ||
+ kframe->size != CRUNCH_STORAGE_SIZE)
+ return -1;
+ crunch_task_restore(current_thread_info(), &kframe->storage);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_IWMMXT
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
@@ -152,8 +172,8 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic0 = IWMMXT_MAGIC0;
- kframe->magic1 = IWMMXT_MAGIC1;
+ kframe->magic = IWMMXT_MAGIC;
+ kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}
@@ -167,8 +187,8 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
- if (kframe->magic0 != IWMMXT_MAGIC0 ||
- kframe->magic1 != IWMMXT_MAGIC1)
+ if (kframe->magic != IWMMXT_MAGIC ||
+ kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
iwmmxt_task_restore(current_thread_info(), &kframe->storage);
return 0;
@@ -177,70 +197,65 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
#endif
/*
- * Auxiliary signal frame. This saves stuff like FP state.
- * The layout of this structure is not part of the user ABI.
- */
-struct aux_sigframe {
-#ifdef CONFIG_IWMMXT
- struct iwmmxt_sigframe iwmmxt;
-#endif
-#ifdef CONFIG_VFP
- union vfp_state vfp;
-#endif
-};
-
-/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
struct sigframe {
- struct sigcontext sc;
- unsigned long extramask[_NSIG_WORDS-1];
+ struct ucontext uc;
unsigned long retcode[2];
- struct aux_sigframe aux __attribute__((aligned(8)));
};
struct rt_sigframe {
- struct siginfo __user *pinfo;
- void __user *puc;
struct siginfo info;
- struct ucontext uc;
- unsigned long retcode[2];
- struct aux_sigframe aux __attribute__((aligned(8)));
+ struct sigframe sig;
};
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
- struct aux_sigframe __user *aux)
+static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
- int err = 0;
+ struct aux_sigframe __user *aux;
+ sigset_t set;
+ int err;
- __get_user_error(regs->ARM_r0, &sc->arm_r0, err);
- __get_user_error(regs->ARM_r1, &sc->arm_r1, err);
- __get_user_error(regs->ARM_r2, &sc->arm_r2, err);
- __get_user_error(regs->ARM_r3, &sc->arm_r3, err);
- __get_user_error(regs->ARM_r4, &sc->arm_r4, err);
- __get_user_error(regs->ARM_r5, &sc->arm_r5, err);
- __get_user_error(regs->ARM_r6, &sc->arm_r6, err);
- __get_user_error(regs->ARM_r7, &sc->arm_r7, err);
- __get_user_error(regs->ARM_r8, &sc->arm_r8, err);
- __get_user_error(regs->ARM_r9, &sc->arm_r9, err);
- __get_user_error(regs->ARM_r10, &sc->arm_r10, err);
- __get_user_error(regs->ARM_fp, &sc->arm_fp, err);
- __get_user_error(regs->ARM_ip, &sc->arm_ip, err);
- __get_user_error(regs->ARM_sp, &sc->arm_sp, err);
- __get_user_error(regs->ARM_lr, &sc->arm_lr, err);
- __get_user_error(regs->ARM_pc, &sc->arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0) {
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+ __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+ __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+ __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+ __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+ __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+ __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+ __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+ __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+ __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+ __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+ __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+ __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+ __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
err |= !valid_user_regs(regs);
+ aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+ if (err == 0)
+ err |= restore_crunch_context(&aux->crunch);
+#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
-// err |= vfp_restore_state(&aux->vfp);
+// err |= vfp_restore_state(&sf->aux.vfp);
#endif
return err;
@@ -249,7 +264,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
asmlinkage int sys_sigreturn(struct pt_regs *regs)
{
struct sigframe __user *frame;
- sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -266,19 +280,8 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->sc, &frame->aux))
+ if (restore_sigframe(regs, frame))
goto badframe;
/* Send SIGTRAP if we're single-stepping */
@@ -297,7 +300,6 @@ badframe:
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
- sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -314,19 +316,11 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux))
+ if (restore_sigframe(regs, &frame->sig))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
+ if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe;
/* Send SIGTRAP if we're single-stepping */
@@ -343,42 +337,50 @@ badframe:
}
static int
-setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux,
- struct pt_regs *regs, unsigned long mask)
+setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
+ struct aux_sigframe __user *aux;
int err = 0;
- __put_user_error(regs->ARM_r0, &sc->arm_r0, err);
- __put_user_error(regs->ARM_r1, &sc->arm_r1, err);
- __put_user_error(regs->ARM_r2, &sc->arm_r2, err);
- __put_user_error(regs->ARM_r3, &sc->arm_r3, err);
- __put_user_error(regs->ARM_r4, &sc->arm_r4, err);
- __put_user_error(regs->ARM_r5, &sc->arm_r5, err);
- __put_user_error(regs->ARM_r6, &sc->arm_r6, err);
- __put_user_error(regs->ARM_r7, &sc->arm_r7, err);
- __put_user_error(regs->ARM_r8, &sc->arm_r8, err);
- __put_user_error(regs->ARM_r9, &sc->arm_r9, err);
- __put_user_error(regs->ARM_r10, &sc->arm_r10, err);
- __put_user_error(regs->ARM_fp, &sc->arm_fp, err);
- __put_user_error(regs->ARM_ip, &sc->arm_ip, err);
- __put_user_error(regs->ARM_sp, &sc->arm_sp, err);
- __put_user_error(regs->ARM_lr, &sc->arm_lr, err);
- __put_user_error(regs->ARM_pc, &sc->arm_pc, err);
- __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
-
- __put_user_error(current->thread.trap_no, &sc->trap_no, err);
- __put_user_error(current->thread.error_code, &sc->error_code, err);
- __put_user_error(current->thread.address, &sc->fault_address, err);
- __put_user_error(mask, &sc->oldmask, err);
-
+ __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+ __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+ __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+ __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+ __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+ __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+ __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+ __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+ __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+ __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+ __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+ __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+ __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+ __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
+ __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
+ __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
+ __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+ if (err == 0)
+ err |= preserve_crunch_context(&aux->crunch);
+#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
-// err |= vfp_save_state(&aux->vfp);
+// err |= vfp_save_state(&sf->aux.vfp);
#endif
+ __put_user_error(0, &aux->end_magic, err);
return err;
}
@@ -487,13 +489,12 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg
if (!frame)
return 1;
- err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]);
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- }
+ /*
+ * Set uc.uc_flags to a value which sc.trap_no would never have.
+ */
+ __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+ err |= setup_sigframe(frame, regs, set);
if (err == 0)
err = setup_return(regs, ka, frame->retcode, frame, usig);
@@ -511,25 +512,20 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
if (!frame)
return 1;
- __put_user_error(&frame->info, &frame->pinfo, err);
- __put_user_error(&frame->uc, &frame->puc, err);
err |= copy_siginfo_to_user(&frame->info, info);
- __put_user_error(0, &frame->uc.uc_flags, err);
- __put_user_error(NULL, &frame->uc.uc_link, err);
+ __put_user_error(0, &frame->sig.uc.uc_flags, err);
+ __put_user_error(NULL, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
stack.ss_sp = (void __user *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
- err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
-
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
+ err |= setup_sigframe(&frame->sig, regs, set);
if (err == 0)
- err = setup_return(regs, ka, frame->retcode, frame, usig);
+ err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
if (err == 0) {
/*
@@ -538,7 +534,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
*/
regs->ARM_r1 = (unsigned long)&frame->info;
- regs->ARM_r2 = (unsigned long)&frame->uc;
+ regs->ARM_r2 = (unsigned long)&frame->sig.uc;
}
return err;
@@ -665,17 +661,33 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
} else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
u32 __user *usp;
+ u32 swival = __NR_restart_syscall;
regs->ARM_sp -= 12;
usp = (u32 __user *)regs->ARM_sp;
+ /*
+ * Either we supports OABI only, or we have
+ * EABI with the OABI compat layer enabled.
+ * In the later case we don't know if user
+ * space is EABI or not, and if not we must
+ * not clobber r7. Always using the OABI
+ * syscall solves that issue and works for
+ * all those cases.
+ */
+ swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE;
+
put_user(regs->ARM_pc, &usp[0]);
/* swi __NR_restart_syscall */
- put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
+ put_user(0xef000000 | swival, &usp[1]);
/* ldr pc, [sp], #12 */
put_user(0xe49df00c, &usp[2]);
@@ -683,6 +695,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
(unsigned long)(usp + 3));
regs->ARM_pc = regs->ARM_sp + 4;
+#endif
}
}
if (regs->ARM_r0 == -ERESTARTNOHAND ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index d6bd435a685..9c12d4fefbd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -379,7 +379,7 @@ static int timer_dyn_tick_enable(void)
int ret = -ENODEV;
if (dyn_tick) {
- write_seqlock_irqsave(&xtime_lock, flags);
+ spin_lock_irqsave(&dyn_tick->lock, flags);
ret = 0;
if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
ret = dyn_tick->enable();
@@ -387,7 +387,7 @@ static int timer_dyn_tick_enable(void)
if (ret == 0)
dyn_tick->state |= DYN_TICK_ENABLED;
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
return ret;
@@ -400,7 +400,7 @@ static int timer_dyn_tick_disable(void)
int ret = -ENODEV;
if (dyn_tick) {
- write_seqlock_irqsave(&xtime_lock, flags);
+ spin_lock_irqsave(&dyn_tick->lock, flags);
ret = 0;
if (dyn_tick->state & DYN_TICK_ENABLED) {
ret = dyn_tick->disable();
@@ -408,7 +408,7 @@ static int timer_dyn_tick_disable(void)
if (ret == 0)
dyn_tick->state &= ~DYN_TICK_ENABLED;
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
return ret;
@@ -422,15 +422,20 @@ static int timer_dyn_tick_disable(void)
void timer_dyn_reprogram(void)
{
struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
- unsigned long next, seq;
+ unsigned long next, seq, flags;
- if (dyn_tick && (dyn_tick->state & DYN_TICK_ENABLED)) {
+ if (!dyn_tick)
+ return;
+
+ spin_lock_irqsave(&dyn_tick->lock, flags);
+ if (dyn_tick->state & DYN_TICK_ENABLED) {
next = next_timer_interrupt();
do {
seq = read_seqbegin(&xtime_lock);
- dyn_tick->reprogram(next_timer_interrupt() - jiffies);
+ dyn_tick->reprogram(next - jiffies);
} while (read_seqretry(&xtime_lock, seq));
}
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
@@ -499,5 +504,10 @@ void __init time_init(void)
if (system_timer->offset == NULL)
system_timer->offset = dummy_gettimeoffset;
system_timer->init();
+
+#ifdef CONFIG_NO_IDLE_HZ
+ if (system_timer->dyn_tick)
+ system_timer->dyn_tick->lock = SPIN_LOCK_UNLOCKED;
+#endif
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 2b254e88595..2df9688a702 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -80,6 +80,10 @@ SECTIONS
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
+#ifndef CONFIG_MMU
+ *(.fixup)
+ *(__ex_table)
+#endif
}
.text : { /* Real text segment */
@@ -87,7 +91,9 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+#ifdef CONFIG_MMU
*(.fixup)
+#endif
*(.gnu.warning)
*(.rodata)
*(.rodata.*)
@@ -142,7 +148,9 @@ SECTIONS
*/
. = ALIGN(32);
__start___ex_table = .;
+#ifdef CONFIG_MMU
*(__ex_table)
+#endif
__stop___ex_table = .;
/*