summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile30
-rw-r--r--arch/sh/kernel/Makefile_3226
-rw-r--r--arch/sh/kernel/Makefile_6422
-rw-r--r--arch/sh/kernel/cpu/Makefile6
-rw-r--r--arch/sh/kernel/cpu/init.c74
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile4
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c257
-rw-r--r--arch/sh/kernel/cpu/irq/intc.c31
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S19
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c89
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c633
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c22
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c319
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S24
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c10
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c11
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c16
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c78
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c537
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c18
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c13
-rw-r--r--arch/sh/kernel/cpu/sh4/softfloat.c892
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c126
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c390
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c19
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c14
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile7
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2101
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c166
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S198
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c326
-rw-r--r--arch/sh/kernel/dump_task.c31
-rw-r--r--arch/sh/kernel/early_printk.c12
-rw-r--r--arch/sh/kernel/entry-common.S27
-rw-r--r--arch/sh/kernel/head_32.S (renamed from arch/sh/kernel/head.S)6
-rw-r--r--arch/sh/kernel/head_64.S356
-rw-r--r--arch/sh/kernel/init_task.c4
-rw-r--r--arch/sh/kernel/io.c67
-rw-r--r--arch/sh/kernel/module.c62
-rw-r--r--arch/sh/kernel/process_32.c (renamed from arch/sh/kernel/process.c)93
-rw-r--r--arch/sh/kernel/process_64.c701
-rw-r--r--arch/sh/kernel/ptrace_32.c (renamed from arch/sh/kernel/ptrace.c)21
-rw-r--r--arch/sh/kernel/ptrace_64.c341
-rw-r--r--arch/sh/kernel/setup.c39
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c (renamed from arch/sh/kernel/sh_ksyms.c)0
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c55
-rw-r--r--arch/sh/kernel/signal_32.c (renamed from arch/sh/kernel/signal.c)18
-rw-r--r--arch/sh/kernel/signal_64.c751
-rw-r--r--arch/sh/kernel/sys_sh.c100
-rw-r--r--arch/sh/kernel/sys_sh32.c84
-rw-r--r--arch/sh/kernel/sys_sh64.c66
-rw-r--r--arch/sh/kernel/syscalls_32.S (renamed from arch/sh/kernel/syscalls.S)0
-rw-r--r--arch/sh/kernel/syscalls_64.S381
-rw-r--r--arch/sh/kernel/time_32.c (renamed from arch/sh/kernel/time.c)2
-rw-r--r--arch/sh/kernel/time_64.c519
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c4
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c1
-rw-r--r--arch/sh/kernel/traps.c949
-rw-r--r--arch/sh/kernel/traps_32.c919
-rw-r--r--arch/sh/kernel/traps_64.c975
-rw-r--r--arch/sh/kernel/vmlinux.lds.S139
-rw-r--r--arch/sh/kernel/vmlinux_32.lds.S152
-rw-r--r--arch/sh/kernel/vmlinux_64.lds.S164
77 files changed, 11960 insertions, 1767 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 4b81d9c47b0..349d833deab 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -1,25 +1,5 @@
-#
-# Makefile for the Linux/SuperH kernel.
-#
-
-extra-y := head.o init_task.o vmlinux.lds
-
-obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process.o ptrace.o \
- semaphore.o setup.o signal.o sys_sh.o syscalls.o \
- time.o topology.o traps.o
-
-obj-y += cpu/ timers/
-obj-$(CONFIG_VSYSCALL) += vsyscall/
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
-obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
-obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_MODULES) += sh_ksyms.o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-
-EXTRA_CFLAGS += -Werror
+ifeq ($(CONFIG_SUPERH32),y)
+include ${srctree}/arch/sh/kernel/Makefile_32
+else
+include ${srctree}/arch/sh/kernel/Makefile_64
+endif
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
new file mode 100644
index 00000000000..c8928983105
--- /dev/null
+++ b/arch/sh/kernel/Makefile_32
@@ -0,0 +1,26 @@
+#
+# Makefile for the Linux/SuperH kernel.
+#
+
+extra-y := head_32.o init_task.o vmlinux.lds
+
+obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
+ ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
+ syscalls_32.o time_32.o topology.o traps.o traps_32.o
+
+obj-y += cpu/ timers/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
+obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_BINFMT_ELF) += dump_task.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
new file mode 100644
index 00000000000..1ef21cc087f
--- /dev/null
+++ b/arch/sh/kernel/Makefile_64
@@ -0,0 +1,22 @@
+extra-y := head_64.o init_task.o vmlinux.lds
+
+obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
+ ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
+ syscalls_64.o time_64.o topology.o traps.o traps_64.o
+
+obj-y += cpu/ timers/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
+obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_BINFMT_ELF) += dump_task.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d055a3ea6b4..f471d242774 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -6,8 +6,14 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
+obj-$(CONFIG_CPU_SH5) = sh5/
+
+# Special cases for family ancestry.
+
obj-$(CONFIG_CPU_SH4A) += sh4a/
+# Common interfaces.
+
obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index c217c4bf008..80a31329ead 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -20,9 +21,12 @@
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
+#include <asm/elf.h>
#include <asm/io.h>
-#include <asm/ubc.h>
#include <asm/smp.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/ubc.h>
+#endif
/*
* Generic wrapper for command line arguments to disable on-chip
@@ -61,25 +65,12 @@ static void __init speculative_execution_init(void)
/*
* Generic first-level cache init
*/
-static void __init cache_init(void)
+#ifdef CONFIG_SUPERH32
+static void __uses_jump_to_uncached cache_init(void)
{
unsigned long ccr, flags;
- /* First setup the rest of the I-cache info */
- current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
- current_cpu_data.icache.linesz;
-
- current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
- current_cpu_data.icache.linesz;
-
- /* And the D-cache too */
- current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
- current_cpu_data.dcache.linesz;
-
- current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
- current_cpu_data.dcache.linesz;
-
- jump_to_P2();
+ jump_to_uncached();
ccr = ctrl_inl(CCR);
/*
@@ -156,7 +147,31 @@ static void __init cache_init(void)
#endif
ctrl_outl(flags, CCR);
- back_to_P1();
+ back_to_cached();
+}
+#else
+#define cache_init() do { } while (0)
+#endif
+
+#define CSHAPE(totalsize, linesize, assoc) \
+ ((totalsize & ~0xff) | (linesize << 4) | assoc)
+
+#define CACHE_DESC_SHAPE(desc) \
+ CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
+
+static void detect_cache_shape(void)
+{
+ l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
+
+ if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
+ l1i_cache_shape = l1d_cache_shape;
+ else
+ l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
+
+ if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
+ l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
+ else
+ l2_cache_shape = -1; /* No S-cache */
}
#ifdef CONFIG_SH_DSP
@@ -228,14 +243,32 @@ asmlinkage void __cpuinit sh_cpu_init(void)
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
+ /* First setup the rest of the I-cache info */
+ current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
+ current_cpu_data.icache.linesz;
+
+ current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
+ current_cpu_data.icache.linesz;
+
+ /* And the D-cache too */
+ current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
+ current_cpu_data.dcache.linesz;
+
+ current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
+ current_cpu_data.dcache.linesz;
+
/* Init the cache */
cache_init();
- if (raw_smp_processor_id() == 0)
+ if (raw_smp_processor_id() == 0) {
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
+ /* Boot CPU sets the cache shape */
+ detect_cache_shape();
+ }
+
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
@@ -273,7 +306,10 @@ asmlinkage void __cpuinit sh_cpu_init(void)
* like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
* we wake it up and hope that all is well.
*/
+#ifdef CONFIG_SUPERH32
if (raw_smp_processor_id() == 0)
ubc_wakeup();
+#endif
+
speculative_execution_init();
}
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8da8e178f09..cc1836e47a5 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,7 +1,9 @@
#
# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
#
-obj-y += imask.o intc.o
+obj-y += intc.o
+obj-$(CONFIG_SUPERH32) += imask.o
+obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
new file mode 100644
index 00000000000..43ee7a9a4f0
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -0,0 +1,257 @@
+/*
+ * arch/sh/kernel/cpu/irq/intc-sh5.c
+ *
+ * Interrupt Controller support for SH5 INTC.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * Per-interrupt selective. IRLM=0 (Fixed priority) is not
+ * supported being useless without a cascaded interrupt
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/cpu/irq.h>
+#include <asm/page.h>
+
+/*
+ * Maybe the generic Peripheral block could move to a more
+ * generic include file. INTC Block will be defined here
+ * and only here to make INTC self-contained in a single
+ * file.
+ */
+#define INTC_BLOCK_OFFSET 0x01000000
+
+/* Base */
+#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
+ INTC_BLOCK_OFFSET
+
+/* Address */
+#define INTC_ICR_SET (intc_virt + 0x0)
+#define INTC_ICR_CLEAR (intc_virt + 0x8)
+#define INTC_INTPRI_0 (intc_virt + 0x10)
+#define INTC_INTSRC_0 (intc_virt + 0x50)
+#define INTC_INTSRC_1 (intc_virt + 0x58)
+#define INTC_INTREQ_0 (intc_virt + 0x60)
+#define INTC_INTREQ_1 (intc_virt + 0x68)
+#define INTC_INTENB_0 (intc_virt + 0x70)
+#define INTC_INTENB_1 (intc_virt + 0x78)
+#define INTC_INTDSB_0 (intc_virt + 0x80)
+#define INTC_INTDSB_1 (intc_virt + 0x88)
+
+#define INTC_ICR_IRLM 0x1
+#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
+#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
+
+
+/*
+ * Mapper between the vector ordinal and the IRQ number
+ * passed to kernel/device drivers.
+ */
+int intc_evt_to_irq[(0xE20/0x20)+1] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
+ 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
+ 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
+ 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
+ -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
+ -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
+ 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
+ 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
+ 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
+ -1, -1 /* 0xE00 - 0xE20 */
+};
+
+/*
+ * Opposite mapper.
+ */
+static int IRQ_to_vectorN[NR_INTC_IRQS] = {
+ 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
+ -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
+ 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
+ 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
+ -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
+
+};
+
+static unsigned long intc_virt;
+
+static unsigned int startup_intc_irq(unsigned int irq);
+static void shutdown_intc_irq(unsigned int irq);
+static void enable_intc_irq(unsigned int irq);
+static void disable_intc_irq(unsigned int irq);
+static void mask_and_ack_intc(unsigned int);
+static void end_intc_irq(unsigned int irq);
+
+static struct hw_interrupt_type intc_irq_type = {
+ .typename = "INTC",
+ .startup = startup_intc_irq,
+ .shutdown = shutdown_intc_irq,
+ .enable = enable_intc_irq,
+ .disable = disable_intc_irq,
+ .ack = mask_and_ack_intc,
+ .end = end_intc_irq
+};
+
+static int irlm; /* IRL mode */
+
+static unsigned int startup_intc_irq(unsigned int irq)
+{
+ enable_intc_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void shutdown_intc_irq(unsigned int irq)
+{
+ disable_intc_irq(irq);
+}
+
+static void enable_intc_irq(unsigned int irq)
+{
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
+ printk("Trying to use straight IRL0-3 with an encoding platform.\n");
+
+ if (irq < 32) {
+ reg = INTC_INTENB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTENB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ ctrl_outl(bitmask, reg);
+}
+
+static void disable_intc_irq(unsigned int irq)
+{
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if (irq < 32) {
+ reg = INTC_INTDSB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTDSB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ ctrl_outl(bitmask, reg);
+}
+
+static void mask_and_ack_intc(unsigned int irq)
+{
+ disable_intc_irq(irq);
+}
+
+static void end_intc_irq(unsigned int irq)
+{
+ enable_intc_irq(irq);
+}
+
+/* For future use, if we ever support IRLM=0) */
+void make_intc_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].chip = &intc_irq_type;
+ disable_intc_irq(irq);
+}
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+int intc_irq_describe(char* p, int irq)
+{
+ if (irq < NR_INTC_IRQS)
+ return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
+ else
+ return 0;
+}
+#endif
+
+void __init plat_irq_setup(void)
+{
+ unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
+ unsigned long reg;
+ unsigned long data;
+ int i;
+
+ intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
+ if (!intc_virt) {
+ panic("Unable to remap INTC\n");
+ }
+
+
+ /* Set default: per-line enable/disable, priority driven ack/eoi */
+ for (i = 0; i < NR_INTC_IRQS; i++) {
+ if (platform_int_priority[i] != NO_PRIORITY) {
+ irq_desc[i].chip = &intc_irq_type;
+ }
+ }
+
+
+ /* Disable all interrupts and set all priorities to 0 to avoid trouble */
+ ctrl_outl(-1, INTC_INTDSB_0);
+ ctrl_outl(-1, INTC_INTDSB_1);
+
+ for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
+ ctrl_outl( NO_PRIORITY, reg);
+
+
+ /* Set IRLM */
+ /* If all the priorities are set to 'no priority', then
+ * assume we are using encoded mode.
+ */
+ irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
+ platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
+
+ if (irlm == NO_PRIORITY) {
+ /* IRLM = 0 */
+ reg = INTC_ICR_CLEAR;
+ i = IRQ_INTA;
+ printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+ } else {
+ /* IRLM = 1 */
+ reg = INTC_ICR_SET;
+ i = IRQ_IRL0;
+ }
+ ctrl_outl(INTC_ICR_IRLM, reg);
+
+ /* Set interrupt priorities according to platform description */
+ for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
+ data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
+ if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
+ /* Upon the 7th, set Priority Register */
+ ctrl_outl(data, reg);
+ data = 0;
+ reg += 8;
+ }
+ }
+
+ /*
+ * And now let interrupts come in.
+ * sti() is not enough, we need to
+ * lower priority, too.
+ */
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
index 6ac018c15e0..84806b2027f 100644
--- a/arch/sh/kernel/cpu/irq/intc.c
+++ b/arch/sh/kernel/cpu/irq/intc.c
@@ -335,31 +335,6 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
return 0;
}
-static unsigned int __init intc_prio_value(struct intc_desc *desc,
- intc_enum enum_id, int do_grps)
-{
- struct intc_prio *p = desc->priorities;
- unsigned int i;
-
- for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
- p = desc->priorities + i;
-
- if (p->enum_id != enum_id)
- continue;
-
- return p->priority;
- }
-
- if (do_grps)
- return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
-
- /* default to the lowest priority possible if no priority is set
- * - this needs to be at least 2 for 5-bit priorities on 7780
- */
-
- return 2;
-}
-
static unsigned int __init intc_mask_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id, int do_grps)
@@ -518,8 +493,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
handle_level_irq, "level");
set_irq_chip_data(irq, (void *)data[primary]);
- /* record the desired priority level */
- intc_prio_level[irq] = intc_prio_value(desc, enum_id, 1);
+ /* set priority level
+ * - this needs to be at least 2 for 5-bit priorities on 7780
+ */
+ intc_prio_level[irq] = 2;
/* enable secondary masking method if present */
if (data[!primary])
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index ee8f1fe84b0..7a26569e795 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -149,6 +149,14 @@ ENTRY(exception_handler)
mov #32,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 32 is trap
+
+#if defined(CONFIG_SH_FPU)
+ mov #13,r8
+ cmp/eq r8,r9
+ bt 10f ! fpu
+ nop
+#endif
+
mov.l 4f,r8
mov r9,r4
shll2 r9
@@ -158,6 +166,10 @@ ENTRY(exception_handler)
cmp/eq r9,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
+#if defined(CONFIG_SH_FPU)
+10:
+ mov.l 9f, r8 ! unhandled exception
+#endif
3:
mov.l 5f,r10
jmp @r8
@@ -177,7 +189,10 @@ interrupt_entry:
6: .long ret_from_irq
7: .long do_IRQ
8: .long do_exception_error
-
+#ifdef CONFIG_SH_FPU
+9: .long fpu_error_trap_handler
+#endif
+
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
@@ -250,7 +265,7 @@ ENTRY(sh_bios_handler)
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
-ENTRY(address_error_handler)
+ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
add #4,r4
mov #OFF_PC,r0
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index ec6adc3f306..b230eb278ce 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -65,7 +65,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
- NULL, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 965fa2572b2..b279cdc3a23 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -6,4 +6,8 @@ obj-y := common.o probe.o opcode_helper.o
common-y += $(addprefix ../sh2/, ex.o entry.o)
+obj-$(CONFIG_SH_FPU) += fpu.o
+
obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
new file mode 100644
index 00000000000..3feb95a4fcb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -0,0 +1,89 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+ *
+ * SH7203 support for the clock framework
+ *
+ * Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
+ *
+ * Based on clock-sh7263.c
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={8,12,16,0};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 0)
+#define PLL2 (1)
+#elif (CONFIG_SH_CLK_MD == 1)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 3)
+#define PLL2 (4)
+#else
+#error "Illegal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
+}
+
+static struct clk_ops sh7203_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7203_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx-2];
+}
+
+static struct clk_ops sh7203_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7203_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7203_clk_ops[] = {
+ &sh7203_master_clk_ops,
+ &sh7203_module_clk_ops,
+ &sh7203_bus_clk_ops,
+ &sh7203_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7203_clk_ops))
+ *ops = sh7203_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
new file mode 100644
index 00000000000..ff99562456f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -0,0 +1,633 @@
+/*
+ * Save/restore floating point context for signal handlers.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FIXME! These routines can be optimized in big endian case.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+/* The PR (precision) bit in the FP Status Register must be clear when
+ * an frchg instruction is executed, otherwise the instruction is undefined.
+ * Executing frchg with PR set causes a trap on some SH4 implementations.
+ */
+
+#define FPSCR_RCHG 0x00000000
+
+
+/*
+ * Save FPU registers onto task structure.
+ * Assume called with FPU enabled (SR.FD=0).
+ */
+void
+save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ unsigned long dummy;
+
+ clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ enable_fpu();
+ asm volatile("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t"
+ : "=r" (dummy)
+ : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+ "r" (FPSCR_RCHG),
+ "r" (FPSCR_INIT)
+ : "memory");
+
+ disable_fpu();
+ release_fpu(regs);
+}
+
+static void
+restore_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile("fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ : "=r" (dummy)
+ : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+ : "memory");
+ disable_fpu();
+}
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using
+ * has the property that no matter wether considered as single or as
+ * double precission represents signaling NANS.
+ */
+
+static void
+fpu_init(void)
+{
+ enable_fpu();
+ asm volatile("lds %0, fpul\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "lds %2, fpscr\n\t"
+ : /* no output */
+ : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
+ disable_fpu();
+}
+
+/*
+ * Emulate arithmetic ops on denormalized number for some FPU insns.
+ */
+
+/* denormalized float * float */
+static int denormal_mulf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ unsigned long long m, n;
+ int exp, w;
+
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000 || ix == 0)
+ return ((hx ^ hy) & 0x80000000);
+
+ exp = (iy & 0x7f800000) >> 23;
+ ix &= 0x007fffff;
+ iy = (iy & 0x007fffff) | 0x00800000;
+ m = (unsigned long long)ix * iy;
+ n = m;
+ w = -1;
+ while (n) { n >>= 1; w++; }
+
+ /* FIXME: use guard bits */
+ exp += w - 126 - 46;
+ if (exp > 0)
+ ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
+ else if (exp + 22 >= 0)
+ ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x80000000;
+ return ix;
+}
+
+/* denormalized double * double */
+static void mult64(unsigned long long x, unsigned long long y,
+ unsigned long long *highp, unsigned long long *lowp)
+{
+ unsigned long long sub0, sub1, sub2, sub3;
+ unsigned long long high, low;
+
+ sub0 = (x >> 32) * (unsigned long) (y >> 32);
+ sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
+ sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
+ sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
+ low = sub3;
+ high = 0LL;
+ sub3 += (sub1 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ sub3 += (sub2 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ high += (sub1 >> 32) + (sub2 >> 32);
+ high += sub0;
+ *lowp = low;
+ *highp = high;
+}
+
+static inline long long rshift64(unsigned long long mh,
+ unsigned long long ml, int n)
+{
+ if (n >= 64)
+ return mh >> (n - 64);
+ return (mh << (64 - n)) | (ml >> n);
+}
+
+static long long denormal_muld(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ unsigned long long mh, ml, nh, nl;
+ int exp, w;
+
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL || ix == 0)
+ return ((hx ^ hy) & 0x8000000000000000LL);
+
+ exp = (iy & 0x7ff0000000000000LL) >> 52;
+ ix &= 0x000fffffffffffffLL;
+ iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ mult64(ix, iy, &mh, &ml);
+ nh = mh;
+ nl = ml;
+ w = -1;
+ if (nh) {
+ while (nh) { nh >>= 1; w++;}
+ w += 64;
+ } else
+ while (nl) { nl >>= 1; w++;}
+
+ /* FIXME: use guard bits */
+ exp += w - 1022 - 52 * 2;
+ if (exp > 0)
+ ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
+ | ((long long)exp << 52);
+ else if (exp + 51 >= 0)
+ ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x8000000000000000LL;
+ return ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static int denormal_subf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix - iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac -= iy;
+ while (frac < 0x00800000) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static int denormal_addf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix + iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac += iy;
+ if (frac >= 0x01000000) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+static int denormal_addf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ int sign;
+
+ if ((hx ^ hy) & 0x80000000) {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000) {
+ ix = denormal_subf1(ix, iy);
+ if (ix < 0) {
+ ix = -ix;
+ sign ^= 0x80000000;
+ }
+ } else {
+ ix = denormal_subf1(iy, ix);
+ sign ^= 0x80000000;
+ }
+ } else {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000)
+ ix = denormal_addf1(ix, iy);
+ else
+ ix = denormal_addf1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ int exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix - iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac -= iy;
+ while (frac < 0x0010000000000000LL) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ long long exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix + iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac += iy;
+ if (frac >= 0x0020000000000000LL) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+static long long denormal_addd(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ long long sign;
+
+ if ((hx ^ hy) & 0x8000000000000000LL) {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL) {
+ ix = denormal_subd1(ix, iy);
+ if (ix < 0) {
+ ix = -ix;
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ ix = denormal_subd1(iy, ix);
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL)
+ ix = denormal_addd1(ix, iy);
+ else
+ ix = denormal_addd1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/**
+ * denormal_to_double - Given denormalized float number,
+ * store double float
+ *
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
+ */
+static void
+denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+{
+ unsigned long du, dl;
+ unsigned long x = fpu->fpul;
+ int exp = 1023 - 126;
+
+ if (x != 0 && (x & 0x7f800000) == 0) {
+ du = (x & 0x80000000);
+ while ((x & 0x00800000) == 0) {
+ x <<= 1;
+ exp--;
+ }
+ x &= 0x007fffff;
+ du |= (exp << 20) | (x >> 3);
+ dl = x << 29;
+
+ fpu->fp_regs[n] = du;
+ fpu->fp_regs[n+1] = dl;
+ }
+}
+
+/**
+ * ieee_fpe_handler - Handle denormalized number exception
+ *
+ * @regs: Pointer to register structure
+ *
+ * Returns 1 when it's handled (should not cause exception).
+ */
+static int
+ieee_fpe_handler (struct pt_regs *regs)
+{
+ unsigned short insn = *(unsigned short *) regs->pc;
+ unsigned short finsn;
+ unsigned long nextpc;
+ int nib[4] = {
+ (insn >> 12) & 0xf,
+ (insn >> 8) & 0xf,
+ (insn >> 4) & 0xf,
+ insn & 0xf};
+
+ if (nib[0] == 0xb ||
+ (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
+ regs->pr = regs->pc + 4;
+ if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ else
+ nextpc = regs->pc + 4;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4;
+ else
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x4 && nib[3] == 0xb &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
+ nextpc = regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
+ nextpc = regs->pc + 4 + regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (insn == 0x000b) { /* rts */
+ nextpc = regs->pr;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else {
+ nextpc = regs->pc + 2;
+ finsn = insn;
+ }
+
+#define FPSCR_FPU_ERROR (1 << 17)
+
+ if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
+ struct task_struct *tsk = current;
+
+ if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
+ /* FPU error */
+ denormal_to_double (&tsk->thread.fpu.hard,
+ (finsn >> 8) & 0xf);
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m+1];
+ if ((hx & 0x7fffffff) >= 0x00100000)
+ llx = denormal_muld(lly, llx);
+ else
+ llx = denormal_muld(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((hx & 0x7fffffff) >= 0x00800000)
+ hx = denormal_mulf(hy, hx);
+ else
+ hx = denormal_mulf(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m+1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = denormal_addd(llx, lly);
+ else
+ llx = denormal_addd(llx, lly ^ (1LL << 63));
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = denormal_addf(hx, hy);
+ else
+ hx = denormal_addf(hx, hy ^ 0x80000000);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ }
+
+ return 0;
+}
+
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ save_fpu(tsk, regs);
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.fpu.hard.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ return;
+ }
+
+ force_sig(SIGFPE, tsk);
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ grab_fpu(regs);
+ if (!user_mode(regs)) {
+ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ return;
+ }
+
+ if (used_math()) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ set_used_math();
+ }
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6d02465704b..6910e266446 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -3,25 +3,36 @@
*
* CPU Subtype Probing for SH-2A.
*
- * Copyright (C) 2004, 2005 Paul Mundt
+ * Copyright (C) 2004 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
int __init detect_cpu_and_cache_system(void)
{
- /* Just SH7206 for now .. */
- boot_cpu_data.type = CPU_SH7206;
+ /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
boot_cpu_data.flags |= CPU_HAS_OP32;
+#if defined(CONFIG_CPU_SUBTYPE_SH7203)
+ boot_cpu_data.type = CPU_SH7203;
+ /* SH7203 has an FPU.. */
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
+ boot_cpu_data.type = CPU_SH7263;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+ boot_cpu_data.type = CPU_SH7206;
+ /* While SH7206 has a DSP.. */
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+#endif
+
boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.dcache.way_incr = (1 << 11);
+ boot_cpu_data.dcache.way_incr = (1 << 11);
boot_cpu_data.dcache.sets = 128;
boot_cpu_data.dcache.entry_shift = 4;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
@@ -37,4 +48,3 @@ int __init detect_cpu_and_cache_system(void)
return 0;
}
-
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
new file mode 100644
index 00000000000..db6ef5cecde
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -0,0 +1,319 @@
+/*
+ * SH7203 and SH7263 Setup
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
+ DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
+ DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
+ DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
+ USB, LCDC, CMT0, CMT1, BSC, WDT,
+ MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
+ MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
+ MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
+ MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
+ MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
+ MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
+ ADC_ADI,
+ IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
+ IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
+ IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
+ IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
+ SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
+ SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
+ SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
+ SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
+ SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
+ SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
+ SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
+
+ /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
+ ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
+ ROMDEC_IREADY,
+
+ FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+
+ SDHI3, SDHI0, SDHI1,
+
+ RTC_ARM, RTC_PRD, RTC_CUP,
+ RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
+ RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
+
+ SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
+
+ /* interrupt groups */
+ PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD,
+ IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
+ SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+ INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
+ INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
+ INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
+ INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
+ INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
+ INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
+ INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
+ INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
+ INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
+ INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
+ INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
+ INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
+ INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
+ INTC_IRQ(MTU2_TCI0V, 150),
+ INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
+ INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
+ INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
+ INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
+ INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
+ INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
+ INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
+ INTC_IRQ(MTU2_TCI3V, 165),
+ INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
+ INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
+ INTC_IRQ(MTU2_TCI4V, 170),
+ INTC_IRQ(ADC_ADI, 171),
+ INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
+ INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
+ INTC_IRQ(IIC30_TEI, 176),
+ INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
+ INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
+ INTC_IRQ(IIC31_TEI, 181),
+ INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
+ INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
+ INTC_IRQ(IIC32_TEI, 186),
+ INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
+ INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
+ INTC_IRQ(IIC33_TEI, 191),
+ INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
+ INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
+ INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
+ INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
+ INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
+ INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
+ INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
+ INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
+ INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
+ INTC_IRQ(SSU0_SSTXI, 210),
+ INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
+ INTC_IRQ(SSU1_SSTXI, 213),
+ INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
+ INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
+ INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
+ INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
+ INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
+ INTC_IRQ(RTC_CUP, 233),
+ INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
+ INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
+ INTC_IRQ(RCAN0_SLE, 238),
+ INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
+ INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
+ INTC_IRQ(RCAN1_SLE, 243),
+
+ /* SH7263-specific trash */
+#ifdef CONFIG_CPU_SUBTYPE_SH7263
+ INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
+ INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
+ INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
+
+ INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
+
+ INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
+ INTC_IRQ(SRC_IDEI, 246),
+
+ INTC_IRQ(IEBI, 247),
+#endif
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+ INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
+ INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
+ INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
+ INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
+ INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
+ INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
+ INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
+ INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
+ INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
+ INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
+ INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
+ INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
+ INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
+ INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
+ INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
+ INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
+ INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
+ IIC30_TEI),
+ INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
+ IIC31_TEI),
+ INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
+ IIC32_TEI),
+ INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
+ IIC33_TEI),
+ INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
+ INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
+ INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
+ INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
+ INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
+ FLCTL_FLTREQ1I),
+ INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
+ INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
+ RCAN0_SLE),
+ INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
+ RCAN1_SLE),
+
+#ifdef CONFIG_CPU_SUBTYPE_SH7263
+ INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
+ ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
+ INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
+ INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
+#endif
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
+ MTU2_VU } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
+ MTU2_TCI4V } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
+#ifdef CONFIG_CPU_SUBTYPE_SH7203
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, 0 } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
+#else
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, ROMDEC } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
+#endif
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 193, 194, 195, 192 },
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 197, 198, 199, 196 },
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 201, 202, 203, 200 },
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 205, 206, 207, 204 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffff2000,
+ .end = 0xffff2000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 232,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 233,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 231,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct platform_device *sh7203_devices[] __initdata = {
+ &sci_device,
+ &rtc_device,
+};
+
+static int __init sh7203_devices_setup(void)
+{
+ return platform_add_devices(sh7203_devices,
+ ARRAY_SIZE(sh7203_devices));
+}
+__initcall(sh7203_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index bd745aa8722..a564425b905 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -167,7 +167,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
- NULL, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 646eb693361..3ae4d9111f1 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o
@@ -21,5 +22,6 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o
clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o
clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
new file mode 100644
index 00000000000..54f54df51ef
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -0,0 +1,71 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7712.c
+ *
+ * SH7712 support for the clock framework
+ *
+ * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk>
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int multipliers[] = { 1, 2, 3 };
+static int divisors[] = { 1, 2, 3, 4, 6 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = (frqcr & 0x0300) >> 8;
+
+ clk->rate *= multipliers[idx];
+}
+
+static struct clk_ops sh7712_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = frqcr & 0x0007;
+
+ clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops sh7712_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = (frqcr & 0x0030) >> 4;
+
+ clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops sh7712_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7712_clk_ops[] = {
+ &sh7712_master_clk_ops,
+ &sh7712_module_clk_ops,
+ &sh7712_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7712_clk_ops))
+ *ops = sh7712_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 0d12a124055..4004073f98c 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,8 +13,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
#include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/page.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -409,6 +410,27 @@ ENTRY(handle_exception)
! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
+
+#ifdef CONFIG_GUSA
+ ! Check for roll back gRB (User and Kernel)
+ mov r15, k0
+ shll k0
+ bf/s 1f
+ shll k0
+ bf/s 1f
+ stc spc, k1
+ stc r0_bank, k0
+ cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
+ bt/s 2f
+ stc r1_bank, k1
+
+ add #-2, k0
+ add r15, k0
+ ldc k0, spc ! PC = saved r0 + r15 - 2
+2: mov k1, r15 ! SP = r1
+1:
+#endif
+
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index b6abf38d3a8..11b6d9c6eda 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -36,7 +36,7 @@ ENTRY(exception_handling_table)
.long exception_error ! address error store /* 100 */
#endif
#if defined(CONFIG_SH_FPU)
- .long do_fpu_error /* 120 */
+ .long fpu_error_trap_handler /* 120 */
#else
.long exception_error /* 120 */
#endif
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf579e061e0..fcc80bb7bee 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,11 +16,11 @@
#include <asm/cache.h>
#include <asm/io.h>
-int __init detect_cpu_and_cache_system(void)
+int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
- jump_to_P2();
+ jump_to_uncached();
/*
* Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
ctrl_outl(data0&~SH_CACHE_VALID, addr0);
ctrl_outl(data2&~SH_CACHE_VALID, addr1);
- back_to_P1();
+ back_to_cached();
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.dcache.entry_shift = 4;
@@ -84,6 +84,9 @@ int __init detect_cpu_and_cache_system(void)
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
boot_cpu_data.type = CPU_SH7720;
#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7721)
+ boot_cpu_data.type = CPU_SH7721;
+#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
boot_cpu_data.type = CPU_SH7705;
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index f6c65f2659e..dd0a20a685f 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -66,12 +66,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF0, 3),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
@@ -85,7 +79,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -93,7 +87,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 60b04b1f945..969804bb523 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -81,13 +81,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(SCI, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF0, 3),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
@@ -109,7 +102,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
@@ -120,7 +113,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
static struct resource rtc_resources[] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 84e5629fa84..0cc0e2bf135 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -73,18 +73,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC1, 7),
- INTC_PRIO(DMAC2, 7),
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SIOF0, 3),
- INTC_PRIO(SIOF1, 3),
- INTC_PRIO(EDMAC0, 5),
- INTC_PRIO(EDMAC1, 5),
- INTC_PRIO(EDMAC2, 5),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
@@ -101,7 +89,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -109,7 +97,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct resource rtc_resources[] = {
[0] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a0929b8a95a..3855ea4c21c 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -85,9 +85,62 @@ static struct platform_device sci_device = {
},
};
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xA4428000,
+ .end = 0xA44280FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 67,
+ .end = 67,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .name = "sh_udc",
+ .start = 0xA4420000,
+ .end = 0xA44200FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "sh_udc",
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
static struct platform_device *sh7720_devices[] __initdata = {
&rtc_device,
&sci_device,
+ &usb_ohci_device,
+ &usbf_device,
};
static int __init sh7720_devices_setup(void)
@@ -127,8 +180,11 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800),
INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
- INTC_VECT(SSL, 0x980), INTC_VECT(USBFI0, 0xa20),
- INTC_VECT(USBFI1, 0xa40), INTC_VECT(USBHI, 0xa60),
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ INTC_VECT(SSL, 0x980),
+#endif
+ INTC_VECT(USBFI0, 0xa20), INTC_VECT(USBFI1, 0xa40),
+ INTC_VECT(USBHI, 0xa60),
INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
@@ -153,22 +209,16 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 2),
- INTC_PRIO(SCIF1, 2),
- INTC_PRIO(DMAC1, 1),
- INTC_PRIO(DMAC2, 1),
- INTC_PRIO(RTC, 2),
- INTC_PRIO(TMU, 2),
- INTC_PRIO(TPU, 2),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
+#else
+ { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
+#endif
{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
@@ -177,7 +227,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_sense_reg sense_registers[] __initdata = {
{ INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
@@ -190,7 +240,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
- NULL, priorities, NULL, prio_registers, sense_registers);
+ NULL, NULL, prio_registers, sense_registers);
void __init plat_irq_setup_pins(int mode)
{
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index dadd6bffc12..d608557c7a3 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -5,7 +5,7 @@
obj-y := probe.o common.o
common-y += $(addprefix ../sh3/, entry.o ex.o)
-obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
# CPU subtype setup
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index c5a4fc77fa0..817f9939cda 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -1,7 +1,4 @@
-/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
- *
- * linux/arch/sh/kernel/fpu.c
- *
+/*
* Save/restore floating point context for signal handlers.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,15 +6,16 @@
* for more details.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
*
- * FIXME! These routines can be optimized in big endian case.
+ * FIXME! These routines have not been tested for big endian case.
*/
-
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/io.h>
+#include <asm/cpu/fpu.h>
#include <asm/processor.h>
#include <asm/system.h>
-#include <asm/io.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
@@ -25,177 +23,184 @@
*/
#define FPSCR_RCHG 0x00000000
+extern unsigned long long float64_div(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_mul(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_add(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_sub(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
+static unsigned int fpu_exception_flags;
/*
* Save FPU registers onto task structure.
* Assume called with FPU enabled (SR.FD=0).
*/
-void
-save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
unsigned long dummy;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
- asm volatile("sts.l fpul, @-%0\n\t"
- "sts.l fpscr, @-%0\n\t"
- "lds %2, fpscr\n\t"
- "frchg\n\t"
- "fmov.s fr15, @-%0\n\t"
- "fmov.s fr14, @-%0\n\t"
- "fmov.s fr13, @-%0\n\t"
- "fmov.s fr12, @-%0\n\t"
- "fmov.s fr11, @-%0\n\t"
- "fmov.s fr10, @-%0\n\t"
- "fmov.s fr9, @-%0\n\t"
- "fmov.s fr8, @-%0\n\t"
- "fmov.s fr7, @-%0\n\t"
- "fmov.s fr6, @-%0\n\t"
- "fmov.s fr5, @-%0\n\t"
- "fmov.s fr4, @-%0\n\t"
- "fmov.s fr3, @-%0\n\t"
- "fmov.s fr2, @-%0\n\t"
- "fmov.s fr1, @-%0\n\t"
- "fmov.s fr0, @-%0\n\t"
- "frchg\n\t"
- "fmov.s fr15, @-%0\n\t"
- "fmov.s fr14, @-%0\n\t"
- "fmov.s fr13, @-%0\n\t"
- "fmov.s fr12, @-%0\n\t"
- "fmov.s fr11, @-%0\n\t"
- "fmov.s fr10, @-%0\n\t"
- "fmov.s fr9, @-%0\n\t"
- "fmov.s fr8, @-%0\n\t"
- "fmov.s fr7, @-%0\n\t"
- "fmov.s fr6, @-%0\n\t"
- "fmov.s fr5, @-%0\n\t"
- "fmov.s fr4, @-%0\n\t"
- "fmov.s fr3, @-%0\n\t"
- "fmov.s fr2, @-%0\n\t"
- "fmov.s fr1, @-%0\n\t"
- "fmov.s fr0, @-%0\n\t"
- "lds %3, fpscr\n\t"
- : "=r" (dummy)
- : "0" ((char *)(&tsk->thread.fpu.hard.status)),
- "r" (FPSCR_RCHG),
- "r" (FPSCR_INIT)
- : "memory");
-
- disable_fpu();
- release_fpu(regs);
+ asm volatile ("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "lds %2, fpscr\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t":"=r" (dummy)
+ :"0"((char *)(&tsk->thread.fpu.hard.status)),
+ "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
+ :"memory");
+
+ disable_fpu();
+ release_fpu(regs);
}
-static void
-restore_fpu(struct task_struct *tsk)
+static void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- enable_fpu();
- asm volatile("lds %2, fpscr\n\t"
- "fmov.s @%0+, fr0\n\t"
- "fmov.s @%0+, fr1\n\t"
- "fmov.s @%0+, fr2\n\t"
- "fmov.s @%0+, fr3\n\t"
- "fmov.s @%0+, fr4\n\t"
- "fmov.s @%0+, fr5\n\t"
- "fmov.s @%0+, fr6\n\t"
- "fmov.s @%0+, fr7\n\t"
- "fmov.s @%0+, fr8\n\t"
- "fmov.s @%0+, fr9\n\t"
- "fmov.s @%0+, fr10\n\t"
- "fmov.s @%0+, fr11\n\t"
- "fmov.s @%0+, fr12\n\t"
- "fmov.s @%0+, fr13\n\t"
- "fmov.s @%0+, fr14\n\t"
- "fmov.s @%0+, fr15\n\t"
- "frchg\n\t"
- "fmov.s @%0+, fr0\n\t"
- "fmov.s @%0+, fr1\n\t"
- "fmov.s @%0+, fr2\n\t"
- "fmov.s @%0+, fr3\n\t"
- "fmov.s @%0+, fr4\n\t"
- "fmov.s @%0+, fr5\n\t"
- "fmov.s @%0+, fr6\n\t"
- "fmov.s @%0+, fr7\n\t"
- "fmov.s @%0+, fr8\n\t"
- "fmov.s @%0+, fr9\n\t"
- "fmov.s @%0+, fr10\n\t"
- "fmov.s @%0+, fr11\n\t"
- "fmov.s @%0+, fr12\n\t"
- "fmov.s @%0+, fr13\n\t"
- "fmov.s @%0+, fr14\n\t"
- "fmov.s @%0+, fr15\n\t"
- "frchg\n\t"
- "lds.l @%0+, fpscr\n\t"
- "lds.l @%0+, fpul\n\t"
- : "=r" (dummy)
- : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
- : "memory");
+ enable_fpu();
+ asm volatile ("lds %2, fpscr\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ :"=r" (dummy)
+ :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
+ :"memory");
disable_fpu();
}
/*
* Load the FPU with signalling NANS. This bit pattern we're using
* has the property that no matter wether considered as single or as
- * double precision represents signaling NANS.
+ * double precision represents signaling NANS.
*/
-static void
-fpu_init(void)
+static void fpu_init(void)
{
enable_fpu();
- asm volatile("lds %0, fpul\n\t"
- "lds %1, fpscr\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
- disable_fpu();
+ asm volatile ( "lds %0, fpul\n\t"
+ "lds %1, fpscr\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "frchg\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "frchg\n\t"
+ "lds %2, fpscr\n\t"
+ : /* no output */
+ :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
+ disable_fpu();
}
/**
- * denormal_to_double - Given denormalized float number,
- * store double float
+ * denormal_to_double - Given denormalized float number,
+ * store double float
*
- * @fpu: Pointer to sh_fpu_hard structure
- * @n: Index to FP register
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
*/
-static void
-denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
@@ -212,7 +217,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
dl = x << 29;
fpu->fp_regs[n] = du;
- fpu->fp_regs[n+1] = dl;
+ fpu->fp_regs[n + 1] = dl;
}
}
@@ -223,68 +228,191 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
*
* Returns 1 when it's handled (should not cause exception).
*/
-static int
-ieee_fpe_handler (struct pt_regs *regs)
+static int ieee_fpe_handler(struct pt_regs *regs)
{
- unsigned short insn = *(unsigned short *) regs->pc;
+ unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
- insn & 0xf};
-
- if (nib[0] == 0xb ||
- (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
- regs->pr = regs->pc + 4;
-
- if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
- nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
+ insn & 0xf
+ };
+
+ if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
+ regs->pr = regs->pc + 4; /* bsr & jsr */
+
+ if (nib[0] == 0xa || nib[0] == 0xb) {
+ /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) {
+ /* bt/s */
if (regs->sr & 1)
- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) {
+ /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
- finsn = *(unsigned short *) (regs->pc + 2);
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
- (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* jmp & jsr */
nextpc = regs->regs[nib[1]];
- finsn = *(unsigned short *) (regs->pc + 2);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
- (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (insn == 0x000b) { /* rts */
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (insn == 0x000b) {
+ /* rts */
nextpc = regs->pr;
- finsn = *(unsigned short *) (regs->pc + 2);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else {
nextpc = regs->pc + instruction_size(insn);
finsn = insn;
}
- if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
+ if ((finsn & 0xf1ff) == 0xf0ad) {
+ /* fcnvsd */
struct task_struct *tsk = current;
save_fpu(tsk, regs);
- if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
+ if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
- denormal_to_double (&tsk->thread.fpu.hard,
- (finsn >> 8) & 0xf);
- tsk->thread.fpu.hard.fpscr &=
- ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
- grab_fpu(regs);
- restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ denormal_to_double(&tsk->thread.fpu.hard,
+ (finsn >> 8) & 0xf);
+ else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) {
+ /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+ llx = float64_mul(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_mul(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) {
+ /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = float64_add(llx, lly);
+ else
+ llx = float64_sub(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = float32_add(hx, hy);
+ else
+ hx = float32_sub(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf003) == 0xf003) {
+ /* fdiv */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+
+ llx = float64_div(llx, lly);
+
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_div(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
} else
- force_sig(SIGFPE, tsk);
+ return 0;
regs->pc = nextpc;
return 1;
@@ -293,27 +421,48 @@ ieee_fpe_handler (struct pt_regs *regs)
return 0;
}
-asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7, struct pt_regs __regs)
+void float_raise(unsigned int flags)
+{
+ fpu_exception_flags |= flags;
+}
+
+int float_rounding_mode(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
+ int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
+ return roundingMode;
+}
- if (ieee_fpe_handler(regs))
- return;
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
- regs->pc += 2;
save_fpu(tsk, regs);
+ fpu_exception_flags = 0;
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.fpu.hard.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
+ /* Set the FPSCR flag as well as cause bits - simply
+ * replicate the cause */
+ tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+ (fpu_exception_flags >> 2)) == 0) {
+ return;
+ }
+ }
+
force_sig(SIGFPE, tsk);
}
-asmlinkage void
-do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7, struct pt_regs __regs)
+BUILD_TRAP_HANDLER(fpu_state_restore)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
grab_fpu(regs);
if (!user_mode(regs)) {
@@ -324,7 +473,7 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
if (used_math()) {
/* Using the FPU again. */
restore_fpu(tsk);
- } else {
+ } else {
/* First time FPU user. */
fpu_init();
set_used_math();
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index bc9c28a69bf..f2b9238cda0 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -98,6 +98,8 @@ int __init detect_cpu_and_cache_system(void)
case 0x200A:
if (prr == 0x61)
boot_cpu_data.type = CPU_SH7781;
+ else if (prr == 0xa1)
+ boot_cpu_data.type = CPU_SH7763;
else
boot_cpu_data.type = CPU_SH7780;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 523f68a9ce0..ae3603aca61 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -126,12 +126,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(REF, REF_RCMI, REF_ROVI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF, 3),
- INTC_PRIO(SCI1, 3),
- INTC_PRIO(DMAC, 7),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
@@ -143,7 +137,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
@@ -163,7 +157,7 @@ static struct intc_group groups_dma4[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
vectors_dma4, groups_dma4,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
/* SH7750R and SH7751R both have 8-channel DMA controllers */
@@ -184,7 +178,7 @@ static struct intc_group groups_dma8[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
vectors_dma8, groups_dma8,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
@@ -205,7 +199,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
- vectors_tmu34, NULL, priorities,
+ vectors_tmu34, NULL,
mask_registers, prio_registers, NULL);
#endif
@@ -216,7 +210,7 @@ static struct intc_vect vectors_irlm[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
/* SH7751 and SH7751R both have PCI */
#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
@@ -233,7 +227,7 @@ static struct intc_group groups_pci[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 7a898cb1d94..85f81579b97 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -92,15 +92,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(REF, REF_RCMI, REF_ROVI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SIM, 3),
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(DMABRG, 13),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
{ IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
@@ -132,7 +123,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
@@ -140,7 +131,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
new file mode 100644
index 00000000000..7b2d337ee41
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -0,0 +1,892 @@
+/*
+ * Floating point emulation support for subnormalised numbers on SH4
+ * architecture This file is derived from the SoftFloat IEC/IEEE
+ * Floating-point Arithmetic Package, Release 2 the original license of
+ * which is reproduced below.
+ *
+ * ========================================================================
+ *
+ * This C source file is part of the SoftFloat IEC/IEEE Floating-point
+ * Arithmetic Package, Release 2.
+ *
+ * Written by John R. Hauser. This work was made possible in part by the
+ * International Computer Science Institute, located at Suite 600, 1947 Center
+ * Street, Berkeley, California 94704. Funding was partially provided by the
+ * National Science Foundation under grant MIP-9311980. The original version
+ * of this code was written as part of a project to build a fixed-point vector
+ * processor in collaboration with the University of California at Berkeley,
+ * overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+ * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+ * arithmetic/softfloat.html'.
+ *
+ * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+ * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+ * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+ * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+ * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+ *
+ * Derivative works are acceptable, even for commercial purposes, so long as
+ * (1) they include prominent notice that the work is derivative, and (2) they
+ * include prominent notice akin to these three paragraphs for those parts of
+ * this code that are retained.
+ *
+ * ========================================================================
+ *
+ * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com>
+ * and Kamel Khelifi <kamel.khelifi@st.com>
+ */
+#include <linux/kernel.h>
+#include <asm/cpu/fpu.h>
+
+#define LIT64( a ) a##LL
+
+typedef char flag;
+typedef unsigned char uint8;
+typedef signed char int8;
+typedef int uint16;
+typedef int int16;
+typedef unsigned int uint32;
+typedef signed int int32;
+
+typedef unsigned long long int bits64;
+typedef signed long long int sbits64;
+
+typedef unsigned char bits8;
+typedef signed char sbits8;
+typedef unsigned short int bits16;
+typedef signed short int sbits16;
+typedef unsigned int bits32;
+typedef signed int sbits32;
+
+typedef unsigned long long int uint64;
+typedef signed long long int int64;
+
+typedef unsigned long int float32;
+typedef unsigned long long float64;
+
+extern void float_raise(unsigned int flags); /* in fpu.c */
+extern int float_rounding_mode(void); /* in fpu.c */
+
+inline bits64 extractFloat64Frac(float64 a);
+inline flag extractFloat64Sign(float64 a);
+inline int16 extractFloat64Exp(float64 a);
+inline int16 extractFloat32Exp(float32 a);
+inline flag extractFloat32Sign(float32 a);
+inline bits32 extractFloat32Frac(float32 a);
+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
+float64 float64_sub(float64 a, float64 b);
+float32 float32_sub(float32 a, float32 b);
+float32 float32_add(float32 a, float32 b);
+float64 float64_add(float64 a, float64 b);
+float64 float64_div(float64 a, float64 b);
+float32 float32_div(float32 a, float32 b);
+float32 float32_mul(float32 a, float32 b);
+float64 float64_mul(float64 a, float64 b);
+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
+
+static int8 countLeadingZeros32(bits32 a);
+static int8 countLeadingZeros64(bits64 a);
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
+ bits64 zSig);
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
+ bits32 zSig);
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
+static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
+ bits64 * zSigPtr);
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr);
+
+inline bits64 extractFloat64Frac(float64 a)
+{
+ return a & LIT64(0x000FFFFFFFFFFFFF);
+}
+
+inline flag extractFloat64Sign(float64 a)
+{
+ return a >> 63;
+}
+
+inline int16 extractFloat64Exp(float64 a)
+{
+ return (a >> 52) & 0x7FF;
+}
+
+inline int16 extractFloat32Exp(float32 a)
+{
+ return (a >> 23) & 0xFF;
+}
+
+inline flag extractFloat32Sign(float32 a)
+{
+ return a >> 31;
+}
+
+inline bits32 extractFloat32Frac(float32 a)
+{
+ return a & 0x007FFFFF;
+}
+
+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
+}
+
+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
+{
+ bits64 z;
+
+ if (count == 0) {
+ z = a;
+ } else if (count < 64) {
+ z = (a >> count) | ((a << ((-count) & 63)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static int8 countLeadingZeros32(bits32 a)
+{
+ static const int8 countLeadingZerosHigh[] = {
+ 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < 0x10000) {
+ shiftCount += 16;
+ a <<= 16;
+ }
+ if (a < 0x1000000) {
+ shiftCount += 8;
+ a <<= 8;
+ }
+ shiftCount += countLeadingZerosHigh[a >> 24];
+ return shiftCount;
+
+}
+
+static int8 countLeadingZeros64(bits64 a)
+{
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < ((bits64) 1) << 32) {
+ shiftCount += 32;
+ } else {
+ a >>= 32;
+ }
+ shiftCount += countLeadingZeros32(a);
+ return shiftCount;
+
+}
+
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(zSig) - 1;
+ return roundAndPackFloat64(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+
+}
+
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 10;
+ bSig <<= 10;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign ^ 1, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ bSig |= LIT64(0x4000000000000000);
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ aSig |= LIT64(0x4000000000000000);
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
+
+}
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 9;
+ bSig <<= 9;
+ if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat64(zSign, 0, (aSig + bSig) >> 9);
+ zSig = LIT64(0x4000000000000000) + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= LIT64(0x2000000000000000);
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits64) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
+}
+
+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
+{
+ bits32 z;
+ if (count == 0) {
+ z = a;
+ } else if (count < 32) {
+ z = (a >> count) | ((a << ((-count) & 31)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ flag roundNearestEven;
+ int8 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x7F;
+ if (0xFD <= (bits16) zExp) {
+ if ((0xFD < zExp)
+ || ((zExp == 0xFD)
+ && ((sbits32) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat32(zSign, 0xFF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement < 0x80000000);
+ shift32RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x7F;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 7;
+ zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(zSig) - 1;
+ return roundAndPackFloat32(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+}
+
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ flag roundNearestEven;
+ int16 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x200;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x3FF;
+ if (0x7FD <= (bits16) zExp) {
+ if ((0x7FD < zExp)
+ || ((zExp == 0x7FD)
+ && ((sbits64) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat64(zSign, 0x7FF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement <
+ LIT64(0x8000000000000000));
+ shift64RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x3FF;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 10;
+ zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat64(zSign, zExp, zSig);
+
+}
+
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 7;
+ bSig <<= 7;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0xFF) {
+ return packFloat32(zSign ^ 1, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x40000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ bSig |= 0x40000000;
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x40000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ aSig |= 0x40000000;
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 6;
+ bSig <<= 6;
+ if (0 < expDiff) {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x20000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x20000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat32(zSign, 0, (aSig + bSig) >> 6);
+ zSig = 0x40000000 + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= 0x20000000;
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits32) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_sub(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return subFloat64Sigs(a, b, aSign);
+ } else {
+ return addFloat64Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_sub(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return subFloat32Sigs(a, b, aSign);
+ } else {
+ return addFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_add(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return addFloat32Sigs(a, b, aSign);
+ } else {
+ return subFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float64 float64_add(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return addFloat64Sigs(a, b, aSign);
+ } else {
+ return subFloat64Sigs(a, b, aSign);
+ }
+}
+
+static void
+normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(aSig) - 11;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+}
+
+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ bits64 z1;
+
+ z1 = a1 + b1;
+ *z1Ptr = z1;
+ *z0Ptr = a0 + b0 + (z1 < a1);
+}
+
+inline void
+sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ *z1Ptr = a1 - b1;
+ *z0Ptr = a0 - b0 - (a1 < b1);
+}
+
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
+{
+ bits64 b0, b1;
+ bits64 rem0, rem1, term0, term1;
+ bits64 z;
+ if (b <= a0)
+ return LIT64(0xFFFFFFFFFFFFFFFF);
+ b0 = b >> 32;
+ z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
+ mul64To128(b, z, &term0, &term1);
+ sub128(a0, a1, term0, term1, &rem0, &rem1);
+ while (((sbits64) rem0) < 0) {
+ z -= LIT64(0x100000000);
+ b1 = b << 32;
+ add128(rem0, rem1, b0, b1, &rem0, &rem1);
+ }
+ rem0 = (rem0 << 32) | (rem1 >> 32);
+ z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
+ return z;
+}
+
+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
+{
+ bits32 aHigh, aLow, bHigh, bLow;
+ bits64 z0, zMiddleA, zMiddleB, z1;
+
+ aLow = a;
+ aHigh = a >> 32;
+ bLow = b;
+ bHigh = b >> 32;
+ z1 = ((bits64) aLow) * bLow;
+ zMiddleA = ((bits64) aLow) * bHigh;
+ zMiddleB = ((bits64) aHigh) * bLow;
+ z0 = ((bits64) aHigh) * bHigh;
+ zMiddleA += zMiddleB;
+ z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
+ zMiddleA <<= 32;
+ z1 += zMiddleA;
+ z0 += (z1 < zMiddleA);
+ *z1Ptr = z1;
+ *z0Ptr = z0;
+
+}
+
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(aSig) - 8;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+
+}
+
+float64 float64_div(float64 a, float64 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ bits64 rem0, rem1;
+ bits64 term0, term1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (bExp == 0x7FF) {
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ if ((aExp | aSig) == 0) {
+ float_raise(FPSCR_CAUSE_INVALID);
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x3FD;
+ aSig = (aSig | LIT64(0x0010000000000000)) << 10;
+ bSig = (bSig | LIT64(0x0010000000000000)) << 11;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = estimateDiv128To64(aSig, 0, bSig);
+ if ((zSig & 0x1FF) <= 2) {
+ mul64To128(bSig, zSig, &term0, &term1);
+ sub128(aSig, 0, term0, term1, &rem0, &rem1);
+ while ((sbits64) rem0 < 0) {
+ --zSig;
+ add128(rem0, rem1, 0, bSig, &rem0, &rem1);
+ }
+ zSig |= (rem1 != 0);
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+float32 float32_div(float32 a, float32 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0xFF) {
+ if (bExp == 0xFF) {
+ }
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x7D;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = (((bits64) aSig) << 32) / bSig;
+ if ((zSig & 0x3F) == 0) {
+ zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
+ }
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float32 float32_mul(float32 a, float32 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned int aSig, bSig;
+ unsigned long long zSig64;
+ unsigned int zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
+ return roundAndPackFloat32(zSign, 0xff, 0);
+
+ zExp = aExp + bExp - 0x7F;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
+ zSig = zSig64;
+ if (0 <= (signed int)(zSig << 1)) {
+ zSig <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_mul(float64 a, float64 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned long long int aSig, bSig, zSig0, zSig1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
+ return roundAndPackFloat64(zSign, 0x7ff, 0);
+
+ zExp = aExp + bExp - 0x3FF;
+ aSig = (aSig | 0x0010000000000000LL) << 10;
+ bSig = (bSig | 0x0010000000000000LL) << 11;
+ mul64To128(aSig, bSig, &zSig0, &zSig1);
+ zSig0 |= (zSig1 != 0);
+ if (0 <= (signed long long int)(zSig0 << 1)) {
+ zSig0 <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig0);
+}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index b22a78c807e..3008c00eea6 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
{
unsigned int cpu = sysdev->id;
struct kobject *kobj;
+ int error;
sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(!sq_kobject[cpu]))
return -ENOMEM;
kobj = sq_kobject[cpu];
- kobj->parent = &sysdev->kobj;
- kobject_set_name(kobj, "%s", "sq");
- kobj->ktype = &ktype_percpu_entry;
-
- return kobject_register(kobj);
+ error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
+ "%s", "sq");
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ return error;
}
static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
unsigned int cpu = sysdev->id;
struct kobject *kobj = sq_kobject[cpu];
- kobject_unregister(kobj);
+ kobject_put(kobj);
return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 24539873943..08ac6387bf1 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,6 +3,7 @@
#
# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o
# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
new file mode 100644
index 00000000000..45889d412c8
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -0,0 +1,126 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+ *
+ * SH7763 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
+static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
+}
+
+static struct clk_ops sh7763_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
+ clk->rate = clk->parent->rate / p0fc_divisors[idx];
+}
+
+static struct clk_ops sh7763_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
+ clk->rate = clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct clk_ops sh7763_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7763_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7763_clk_ops[] = {
+ &sh7763_master_clk_ops,
+ &sh7763_module_clk_ops,
+ &sh7763_bus_clk_ops,
+ &sh7763_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7763_clk_ops))
+ *ops = sh7763_clk_ops[idx];
+}
+
+static void shyway_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
+ clk->rate = clk->parent->rate / cfc_divisors[idx];
+}
+
+static struct clk_ops sh7763_shyway_clk_ops = {
+ .recalc = shyway_clk_recalc,
+};
+
+static struct clk sh7763_shyway_clk = {
+ .name = "shyway_clk",
+ .flags = CLK_ALWAYS_ENABLED,
+ .ops = &sh7763_shyway_clk_ops,
+};
+
+/*
+ * Additional SH7763-specific on-chip clocks that aren't already part of the
+ * clock framework
+ */
+static struct clk *sh7763_onchip_clocks[] = {
+ &sh7763_shyway_clk,
+};
+
+static int __init sh7763_clk_init(void)
+{
+ struct clk *clk = clk_get(NULL, "master_clk");
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
+ struct clk *clkp = sh7763_onchip_clocks[i];
+
+ clkp->parent = clk;
+ clk_register(clkp);
+ clk_enable(clkp);
+ }
+
+ /*
+ * Now that we have the rest of the clocks registered, we need to
+ * force the parent clock to propagate so that these clocks will
+ * automatically figure out their rate. We cheat by handing the
+ * parent clock its current rate and forcing child propagation.
+ */
+ clk_set_rate(clk, clk_get_rate(clk));
+
+ clk_put(clk);
+
+ return 0;
+}
+
+arch_initcall(sh7763_clk_init);
+
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b9c6547c4a9..73c778d40d1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -157,14 +157,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(TMU0, 2),
- INTC_PRIO(TMU1, 2),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
@@ -217,7 +209,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
mask_registers, prio_registers, sense_registers);
void __init plat_irq_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
new file mode 100644
index 00000000000..eabd5386812
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -0,0 +1,390 @@
+/*
+ * SH7763 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <asm/sci.h>
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffe80000,
+ .end = 0xffe80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 22,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .mapbase = 0xffe08000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xffec8000,
+ .end = 0xffec80ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 83,
+ .end = 83,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .start = 0xffec0000,
+ .end = 0xffec00ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 84,
+ .end = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
+static struct platform_device *sh7763_devices[] __initdata = {
+ &rtc_device,
+ &sci_device,
+ &usb_ohci_device,
+ &usbf_device,
+};
+
+static int __init sh7763_devices_setup(void)
+{
+ return platform_add_devices(sh7763_devices,
+ ARRAY_SIZE(sh7763_devices));
+}
+__initcall(sh7763_devices_setup);
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ RTC_ATI, RTC_PRI, RTC_CUI,
+ WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, LCDC,
+ DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
+ SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
+ DMAC0_DMINT4, DMAC0_DMINT5,
+ IIC0, IIC1,
+ CMT,
+ GEINT0, GEINT1, GEINT2,
+ HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
+ PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
+ STIF0, STIF1,
+ SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
+ SIOF0, SIOF1, SIOF2,
+ USBH, USBFI0, USBFI1,
+ TPU, PCC,
+ MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
+ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
+ TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
+ SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
+ GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
+
+ /* interrupt groups */
+
+ TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
+ SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
+ INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
+ INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
+ INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
+ INTC_VECT(LCDC, 0x620),
+ INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
+ INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
+ INTC_VECT(DMAC0_DMAE, 0x6c0),
+ INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
+ INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
+ INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
+ INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
+ INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
+ INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
+ INTC_VECT(HAC, 0x980),
+ INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
+ INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
+ INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
+ INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
+ INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
+ INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
+ INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
+ INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
+ INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
+ INTC_VECT(USBFI1, 0xca0),
+ INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
+ INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
+ INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
+ INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
+ INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
+ INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
+ INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
+ INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
+ INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
+ INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
+ INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+ INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
+ INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
+ DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
+ INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
+ INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
+ INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
+ INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
+ INTC_GROUP(USBF, USBFI0, USBFI1),
+ INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
+ INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
+ INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
+ INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
+};
+
+static struct intc_prio priorities[] __initdata = {
+ INTC_PRIO(SCIF0, 3),
+ INTC_PRIO(SCIF1, 3),
+ INTC_PRIO(SCIF2, 3),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, GPIO, 0,
+ SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
+ PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
+ HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
+ 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
+ PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
+ LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
+ TMU2, TMU2_TICPI } },
+ { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
+ { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
+ { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
+ { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
+ PCISERR, PCIINTA } },
+ { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
+ PCIINTD, PCIC5 } },
+ { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
+ { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
+ { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
+ { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
+ { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
+ { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
+ { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
+ { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
+ NULL, NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
+ NULL, NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
+ NULL, NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ7-0 */
+ ctrl_outl(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ ctrl_outl(0xc0000000, INTC_INTMSK1);
+ ctrl_outl(0xfffefffe, INTC_INTMSK2);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index e8fd33ff060..293004b526f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -168,11 +168,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
@@ -195,7 +190,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -223,7 +218,7 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
- NULL, NULL, irq_mask_registers, irq_prio_registers,
+ NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers);
/* External interrupt pins in IRL mode */
@@ -257,10 +252,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
- NULL, NULL, irl7654_mask_registers, NULL, NULL);
+ NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
- NULL, NULL, irl3210_mask_registers, NULL, NULL);
+ NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 39b215d6cee..74b60e96cdf 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -178,15 +178,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF3, 3),
- INTC_PRIO(SCIF4, 3),
- INTC_PRIO(SCIF5, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
@@ -227,7 +218,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -248,11 +239,11 @@ static struct intc_sense_reg sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
- NULL, NULL, mask_registers, prio_registers,
+ NULL, mask_registers, prio_registers,
sense_registers);
static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
- NULL, NULL, mask_registers, prio_registers,
+ NULL, mask_registers, prio_registers,
sense_registers);
/* External interrupt pins in IRL mode */
@@ -280,10 +271,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
- NULL, NULL, mask_registers, NULL, NULL);
+ NULL, mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
- NULL, NULL, mask_registers, NULL, NULL);
+ NULL, mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index c6cdd7e3b04..4dc958b6b31 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -165,13 +165,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF3, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -218,7 +211,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
};
-static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -232,8 +225,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
- priorities, mask_registers, prio_registers,
- sense_registers);
+ mask_registers, prio_registers, sense_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl[] __initdata = {
@@ -248,7 +240,7 @@ static struct intc_vect vectors_irl[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
void __init plat_irq_setup_pins(int mode)
{
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
new file mode 100644
index 00000000000..8646363e9de
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-5 backends.
+#
+obj-y := entry.o probe.o switchto.o
+
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_KALLSYMS) += unwind.o
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 00000000000..ba8750176d9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2101 @@
+/*
+ * arch/sh/kernel/cpu/sh5/entry.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <asm/cpu/registers.h>
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * SR fields.
+ */
+#define SR_ASID_MASK 0x00ff0000
+#define SR_FD_MASK 0x00008000
+#define SR_SS 0x08000000
+#define SR_BL 0x10000000
+#define SR_MD 0x40000000
+
+/*
+ * Event code.
+ */
+#define EVENT_INTERRUPT 0
+#define EVENT_FAULT_TLB 1
+#define EVENT_FAULT_NOT_TLB 2
+#define EVENT_DEBUG 3
+
+/* EXPEVT values */
+#define RESET_CAUSE 0x20
+#define DEBUGSS_CAUSE 0x980
+
+/*
+ * Frame layout. Quad index.
+ */
+#define FRAME_T(x) FRAME_TBASE+(x*8)
+#define FRAME_R(x) FRAME_RBASE+(x*8)
+#define FRAME_S(x) FRAME_SBASE+(x*8)
+#define FSPC 0
+#define FSSR 1
+#define FSYSCALL_ID 2
+
+/* Arrange the save frame to be a multiple of 32 bytes long */
+#define FRAME_SBASE 0
+#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
+#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
+#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
+#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
+
+#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
+#define FP_FRAME_BASE 0
+
+#define SAVED_R2 0*8
+#define SAVED_R3 1*8
+#define SAVED_R4 2*8
+#define SAVED_R5 3*8
+#define SAVED_R18 4*8
+#define SAVED_R6 5*8
+#define SAVED_TR0 6*8
+
+/* These are the registers saved in the TLB path that aren't saved in the first
+ level of the normal one. */
+#define TLB_SAVED_R25 7*8
+#define TLB_SAVED_TR1 8*8
+#define TLB_SAVED_TR2 9*8
+#define TLB_SAVED_TR3 10*8
+#define TLB_SAVED_TR4 11*8
+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+ breakage otherwise. */
+#define TLB_SAVED_R0 12*8
+#define TLB_SAVED_R1 13*8
+
+#define CLI() \
+ getcon SR, r6; \
+ ori r6, 0xf0, r6; \
+ putcon r6, SR;
+
+#define STI() \
+ getcon SR, r6; \
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop() CLI()
+#else
+# define preempt_stop()
+# define resume_kernel restore_all
+#endif
+
+ .section .data, "aw"
+
+#define FAST_TLBMISS_STACK_CACHELINES 4
+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+
+/* Register back-up area for all exceptions */
+ .balign 32
+ /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+ * register saves etc. */
+ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+/* This is 32 byte aligned by construction */
+/* Register back-up area for all exceptions */
+reg_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+
+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+ * reentrancy. Note this area may be accessed via physical address.
+ * Align so this fits a whole single cache line, for ease of purging.
+ */
+ .balign 32,0,32
+resvec_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .balign 32,0,32
+
+/* Jump table of 3rd level handlers */
+trap_jtable:
+ .long do_exception_error /* 0x000 */
+ .long do_exception_error /* 0x020 */
+ .long tlb_miss_load /* 0x040 */
+ .long tlb_miss_store /* 0x060 */
+ ! ARTIFICIAL pseudo-EXPEVT setting
+ .long do_debug_interrupt /* 0x080 */
+ .long tlb_miss_load /* 0x0A0 */
+ .long tlb_miss_store /* 0x0C0 */
+ .long do_address_error_load /* 0x0E0 */
+ .long do_address_error_store /* 0x100 */
+#ifdef CONFIG_SH_FPU
+ .long do_fpu_error /* 0x120 */
+#else
+ .long do_exception_error /* 0x120 */
+#endif
+ .long do_exception_error /* 0x140 */
+ .long system_call /* 0x160 */
+ .long do_reserved_inst /* 0x180 */
+ .long do_illegal_slot_inst /* 0x1A0 */
+ .long do_exception_error /* 0x1C0 - NMI */
+ .long do_exception_error /* 0x1E0 */
+ .rept 15
+ .long do_IRQ /* 0x200 - 0x3C0 */
+ .endr
+ .long do_exception_error /* 0x3E0 */
+ .rept 32
+ .long do_IRQ /* 0x400 - 0x7E0 */
+ .endr
+ .long fpu_error_or_IRQA /* 0x800 */
+ .long fpu_error_or_IRQB /* 0x820 */
+ .long do_IRQ /* 0x840 */
+ .long do_IRQ /* 0x860 */
+ .rept 6
+ .long do_exception_error /* 0x880 - 0x920 */
+ .endr
+ .long do_software_break_point /* 0x940 */
+ .long do_exception_error /* 0x960 */
+ .long do_single_step /* 0x980 */
+
+ .rept 3
+ .long do_exception_error /* 0x9A0 - 0x9E0 */
+ .endr
+ .long do_IRQ /* 0xA00 */
+ .long do_IRQ /* 0xA20 */
+ .long itlb_miss_or_IRQ /* 0xA40 */
+ .long do_IRQ /* 0xA60 */
+ .long do_IRQ /* 0xA80 */
+ .long itlb_miss_or_IRQ /* 0xAA0 */
+ .long do_exception_error /* 0xAC0 */
+ .long do_address_error_exec /* 0xAE0 */
+ .rept 8
+ .long do_exception_error /* 0xB00 - 0xBE0 */
+ .endr
+ .rept 18
+ .long do_IRQ /* 0xC00 - 0xE20 */
+ .endr
+
+ .section .text64, "ax"
+
+/*
+ * --- Exception/Interrupt/Event Handling Section
+ */
+
+/*
+ * VBR and RESVEC blocks.
+ *
+ * First level handler for VBR-based exceptions.
+ *
+ * To avoid waste of space, align to the maximum text block size.
+ * This is assumed to be at most 128 bytes or 32 instructions.
+ * DO NOT EXCEED 32 instructions on the first level handlers !
+ *
+ * Also note that RESVEC is contained within the VBR block
+ * where the room left (1KB - TEXT_SIZE) allows placing
+ * the RESVEC block (at most 512B + TEXT_SIZE).
+ *
+ * So first (and only) level handler for RESVEC-based exceptions.
+ *
+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+ * and interrupt) we are a lot tight with register space until
+ * saving onto the stack frame, which is done in handle_exception().
+ *
+ */
+
+#define TEXT_SIZE 128
+#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
+
+ .balign TEXT_SIZE
+LVBR_block:
+ .space 256, 0 /* Power-on class handler, */
+ /* not required here */
+not_a_tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+ ! VBR+0x200
+ nop
+ .balign 256
+ ! VBR+0x300
+ nop
+ .balign 256
+ /*
+ * Instead of the natural .balign 1024 place RESVEC here
+ * respecting the final 1KB alignment.
+ */
+ .balign TEXT_SIZE
+ /*
+ * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+ * block making sure the final alignment is correct.
+ */
+tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, KCR1
+ movi reg_save_area, SP
+ /* SP is guaranteed 32-byte aligned. */
+ st.q SP, TLB_SAVED_R0 , r0
+ st.q SP, TLB_SAVED_R1 , r1
+ st.q SP, SAVED_R2 , r2
+ st.q SP, SAVED_R3 , r3
+ st.q SP, SAVED_R4 , r4
+ st.q SP, SAVED_R5 , r5
+ st.q SP, SAVED_R6 , r6
+ st.q SP, SAVED_R18, r18
+
+ /* Save R25 for safety; as/ld may want to use it to achieve the call to
+ * the code in mm/tlbmiss.c */
+ st.q SP, TLB_SAVED_R25, r25
+ gettr tr0, r2
+ gettr tr1, r3
+ gettr tr2, r4
+ gettr tr3, r5
+ gettr tr4, r18
+ st.q SP, SAVED_TR0 , r2
+ st.q SP, TLB_SAVED_TR1 , r3
+ st.q SP, TLB_SAVED_TR2 , r4
+ st.q SP, TLB_SAVED_TR3 , r5
+ st.q SP, TLB_SAVED_TR4 , r18
+
+ pt do_fast_page_fault, tr0
+ getcon SSR, r2
+ getcon EXPEVT, r3
+ getcon TEA, r4
+ shlri r2, 30, r2
+ andi r2, 1, r2 /* r2 = SSR.MD */
+ blink tr0, LINK
+
+ pt fixup_to_invoke_general_handler, tr1
+
+ /* If the fast path handler fixed the fault, just drop through quickly
+ to the restore code right away to return to the excepting context.
+ */
+ beqi/u r2, 0, tr1
+
+fast_tlb_miss_restore:
+ ld.q SP, SAVED_TR0, r2
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+
+ ptabs r2, tr0
+ ptabs r3, tr1
+ ptabs r4, tr2
+ ptabs r5, tr3
+ ptabs r18, tr4
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+ ld.q SP, SAVED_R2, r2
+ ld.q SP, SAVED_R3, r3
+ ld.q SP, SAVED_R4, r4
+ ld.q SP, SAVED_R5, r5
+ ld.q SP, SAVED_R6, r6
+ ld.q SP, SAVED_R18, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ getcon KCR1, SP
+ rte
+ nop /* for safety, in case the code is run on sh5-101 cut1.x */
+
+fixup_to_invoke_general_handler:
+
+ /* OK, new method. Restore stuff that's not expected to get saved into
+ the 'first-level' reg save area, then just fall through to setting
+ up the registers and calling the second-level handler. */
+
+ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
+ r25,tr1-4 and save r6 to get into the right state. */
+
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+
+ ptabs/u r3, tr1
+ ptabs/u r4, tr2
+ ptabs/u r5, tr3
+ ptabs/u r18, tr4
+
+ /* Set args for Non-debug, TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+ DOES END UP AT VBR+0x600 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .balign 256
+ /* VBR + 0x600 */
+
+interrupt:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for interrupt class handler */
+ getcon INTEVT, r2
+ movi ret_from_irq, r3
+ ori r3, 1, r3
+ movi EVENT_INTERRUPT, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+ .balign TEXT_SIZE /* let's waste the bare minimum */
+
+LVBR_block_end: /* Marker. Used for total checking */
+
+ .balign 256
+LRESVEC_block:
+ /* Panic handler. Called with MMU off. Possible causes/actions:
+ * - Reset: Jump to program start.
+ * - Single Step: Turn off Single Step & return.
+ * - Others: Call panic handler, passing PC as arg.
+ * (this may need to be extended...)
+ */
+reset_or_panic:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, DCR
+ /* First save r0-1 and tr0, as we need to use these */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ gettr tr0, r0
+ st.q SP, 32, r0
+
+ /* Check cause */
+ getcon EXPEVT, r0
+ movi RESET_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if reset */
+ movi _stext-CONFIG_PAGE_OFFSET, r0
+ ori r0, 1, r0
+ ptabs r0, tr0
+ beqi r1, 0, tr0 /* Jump to start address if reset */
+
+ getcon EXPEVT, r0
+ movi DEBUGSS_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if single step */
+ pta single_step_panic, tr0
+ beqi r1, 0, tr0 /* jump if single step */
+
+ /* Now jump to where we save the registers. */
+ movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ blink tr0, r63
+
+single_step_panic:
+ /* We are in a handler with Single Step set. We need to resume the
+ * handler, by turning on MMU & turning off Single Step. */
+ getcon SSR, r0
+ movi SR_MMU, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Restore EXPEVT, as the rte won't do this */
+ getcon PEXPEVT, r0
+ putcon r0, EXPEVT
+ /* Restore regs */
+ ld.q SP, 32, r0
+ ptabs r0, tr0
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+ getcon DCR, SP
+ synco
+ rte
+
+
+ .balign 256
+debug_exception:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /*
+ * Single step/software_break_point first level handler.
+ * Called with MMU off, so the first thing we do is enable it
+ * by doing an rte with appropriate SSR.
+ */
+ putcon SP, DCR
+ /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+
+ /* With the MMU off, we are bypassing the cache, so purge any
+ * data that will be made stale by the following stores.
+ */
+ ocbp SP, 0
+ synco
+
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ getcon SPC, r0
+ st.q SP, 16, r0
+ getcon SSR, r0
+ st.q SP, 24, r0
+
+ /* Enable MMU, block exceptions, set priv mode, disable single step */
+ movi SR_MMU | SR_BL | SR_MD, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Force control to debug_exception_2 when rte is executed */
+ movi debug_exeception_2, r0
+ ori r0, 1, r0 /* force SHmedia, just in case */
+ putcon r0, SPC
+ getcon DCR, SP
+ synco
+ rte
+debug_exeception_2:
+ /* Restore saved regs */
+ putcon SP, KCR1
+ movi resvec_save_area, SP
+ ld.q SP, 24, r0
+ putcon r0, SSR
+ ld.q SP, 16, r0
+ putcon r0, SPC
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for debug class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_DEBUG, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+debug_interrupt:
+ /* !!! WE COME HERE IN REAL MODE !!! */
+ /* Hook-up debug interrupt to allow various debugging options to be
+ * hooked into its handler. */
+ /* Save original stack pointer into KCR1 */
+ synco
+ putcon SP, KCR1
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ ocbp SP, 0
+ ocbp SP, 32
+ synco
+
+ /* Save other original registers into reg_save_area thru real addresses */
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* move (spc,ssr)->(pspc,pssr). The rte will shift
+ them back again, so that they look like the originals
+ as far as the real handler code is concerned. */
+ getcon spc, r6
+ putcon r6, pspc
+ getcon ssr, r6
+ putcon r6, pssr
+
+ ! construct useful SR for handle_exception
+ movi 3, r6
+ shlli r6, 30, r6
+ getcon sr, r18
+ or r18, r6, r6
+ putcon r6, ssr
+
+ ! SSR is now the current SR with the MD and MMU bits set
+ ! i.e. the rte will switch back to priv mode and put
+ ! the mmu back on
+
+ ! construct spc
+ movi handle_exception, r18
+ ori r18, 1, r18 ! for safety (do we need this?)
+ putcon r18, spc
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+
+ ! EXPEVT==0x80 is unused, so 'steal' this value to put the
+ ! debug interrupt handler in the vectoring table
+ movi 0x80, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+
+ or SP, ZERO, r5
+ movi CONFIG_PAGE_OFFSET, r6
+ add r6, r5, r5
+ getcon KCR1, SP
+
+ synco ! for safety
+ rte ! -> handle_exception, switch back to priv mode again
+
+LRESVEC_block_end: /* Marker. Unused. */
+
+ .balign TEXT_SIZE
+
+/*
+ * Second level handler for VBR-based exceptions. Pre-handler.
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (KCR0) Current [current task union]
+ * (KCR1) Original SP
+ * (r2) INTEVT/EXPEVT
+ * (r3) appropriate return address
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
+ * (r5) Pointer to reg_save_area
+ * (SP) Original SP
+ *
+ * Available registers:
+ * (r6)
+ * (r18)
+ * (tr0)
+ *
+ */
+handle_exception:
+ /* Common 2nd level handler. */
+
+ /* First thing we need an appropriate stack pointer */
+ getcon SSR, r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta stack_ok, tr0
+ bne r6, ZERO, tr0 /* Original stack pointer is fine */
+
+ /* Set stack pointer for user fault */
+ getcon KCR0, SP
+ movi THREAD_SIZE, r6 /* Point to the end */
+ add SP, r6, SP
+
+stack_ok:
+
+/* DEBUG : check for underflow/overflow of the kernel stack */
+ pta no_underflow, tr0
+ getcon KCR0, r6
+ movi 1024, r18
+ add r6, r18, r6
+ bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
+
+/* Just panic to cause a crash. */
+bad_sp:
+ ld.b r63, 0, r6
+ nop
+
+no_underflow:
+ pta bad_sp, tr0
+ getcon kcr0, r6
+ movi THREAD_SIZE, r18
+ add r18, r6, r6
+ bgt SP, r6, tr0 ! sp above the stack
+
+ /* Make some room for the BASIC frame. */
+ movi -(FRAME_SIZE), r6
+ add SP, r6, SP
+
+/* Could do this with no stalling if we had another spare register, but the
+ code below will be OK. */
+ ld.q r5, SAVED_R2, r6
+ ld.q r5, SAVED_R3, r18
+ st.q SP, FRAME_R(2), r6
+ ld.q r5, SAVED_R4, r6
+ st.q SP, FRAME_R(3), r18
+ ld.q r5, SAVED_R5, r18
+ st.q SP, FRAME_R(4), r6
+ ld.q r5, SAVED_R6, r6
+ st.q SP, FRAME_R(5), r18
+ ld.q r5, SAVED_R18, r18
+ st.q SP, FRAME_R(6), r6
+ ld.q r5, SAVED_TR0, r6
+ st.q SP, FRAME_R(18), r18
+ st.q SP, FRAME_T(0), r6
+
+ /* Keep old SP around */
+ getcon KCR1, r6
+
+ /* Save the rest of the general purpose registers */
+ st.q SP, FRAME_R(0), r0
+ st.q SP, FRAME_R(1), r1
+ st.q SP, FRAME_R(7), r7
+ st.q SP, FRAME_R(8), r8
+ st.q SP, FRAME_R(9), r9
+ st.q SP, FRAME_R(10), r10
+ st.q SP, FRAME_R(11), r11
+ st.q SP, FRAME_R(12), r12
+ st.q SP, FRAME_R(13), r13
+ st.q SP, FRAME_R(14), r14
+
+ /* SP is somewhere else */
+ st.q SP, FRAME_R(15), r6
+
+ st.q SP, FRAME_R(16), r16
+ st.q SP, FRAME_R(17), r17
+ /* r18 is saved earlier. */
+ st.q SP, FRAME_R(19), r19
+ st.q SP, FRAME_R(20), r20
+ st.q SP, FRAME_R(21), r21
+ st.q SP, FRAME_R(22), r22
+ st.q SP, FRAME_R(23), r23
+ st.q SP, FRAME_R(24), r24
+ st.q SP, FRAME_R(25), r25
+ st.q SP, FRAME_R(26), r26
+ st.q SP, FRAME_R(27), r27
+ st.q SP, FRAME_R(28), r28
+ st.q SP, FRAME_R(29), r29
+ st.q SP, FRAME_R(30), r30
+ st.q SP, FRAME_R(31), r31
+ st.q SP, FRAME_R(32), r32
+ st.q SP, FRAME_R(33), r33
+ st.q SP, FRAME_R(34), r34
+ st.q SP, FRAME_R(35), r35
+ st.q SP, FRAME_R(36), r36
+ st.q SP, FRAME_R(37), r37
+ st.q SP, FRAME_R(38), r38
+ st.q SP, FRAME_R(39), r39
+ st.q SP, FRAME_R(40), r40
+ st.q SP, FRAME_R(41), r41
+ st.q SP, FRAME_R(42), r42
+ st.q SP, FRAME_R(43), r43
+ st.q SP, FRAME_R(44), r44
+ st.q SP, FRAME_R(45), r45
+ st.q SP, FRAME_R(46), r46
+ st.q SP, FRAME_R(47), r47
+ st.q SP, FRAME_R(48), r48
+ st.q SP, FRAME_R(49), r49
+ st.q SP, FRAME_R(50), r50
+ st.q SP, FRAME_R(51), r51
+ st.q SP, FRAME_R(52), r52
+ st.q SP, FRAME_R(53), r53
+ st.q SP, FRAME_R(54), r54
+ st.q SP, FRAME_R(55), r55
+ st.q SP, FRAME_R(56), r56
+ st.q SP, FRAME_R(57), r57
+ st.q SP, FRAME_R(58), r58
+ st.q SP, FRAME_R(59), r59
+ st.q SP, FRAME_R(60), r60
+ st.q SP, FRAME_R(61), r61
+ st.q SP, FRAME_R(62), r62
+
+ /*
+ * Save the S* registers.
+ */
+ getcon SSR, r61
+ st.q SP, FRAME_S(FSSR), r61
+ getcon SPC, r62
+ st.q SP, FRAME_S(FSPC), r62
+ movi -1, r62 /* Reset syscall_nr */
+ st.q SP, FRAME_S(FSYSCALL_ID), r62
+
+ /* Save the rest of the target registers */
+ gettr tr1, r6
+ st.q SP, FRAME_T(1), r6
+ gettr tr2, r6
+ st.q SP, FRAME_T(2), r6
+ gettr tr3, r6
+ st.q SP, FRAME_T(3), r6
+ gettr tr4, r6
+ st.q SP, FRAME_T(4), r6
+ gettr tr5, r6
+ st.q SP, FRAME_T(5), r6
+ gettr tr6, r6
+ st.q SP, FRAME_T(6), r6
+ gettr tr7, r6
+ st.q SP, FRAME_T(7), r6
+
+ ! setup FP so that unwinder can wind back through nested kernel mode
+ ! exceptions
+ add SP, ZERO, r14
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* We've pushed all the registers now, so only r2-r4 hold anything
+ * useful. Move them into callee save registers */
+ or r2, ZERO, r28
+ or r3, ZERO, r29
+ or r4, ZERO, r30
+
+ /* Preserve r2 as the event code */
+ movi evt_debug, r3
+ ori r3, 1, r3
+ ptabs r3, tr0
+
+ or SP, ZERO, r6
+ getcon TRA, r5
+ blink tr0, LINK
+
+ or r28, ZERO, r2
+ or r29, ZERO, r3
+ or r30, ZERO, r4
+#endif
+
+ /* For syscall and debug race condition, get TRA now */
+ getcon TRA, r5
+
+ /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
+ * Also set FD, to catch FPU usage in the kernel.
+ *
+ * benedict.gaster@superh.com 29/07/2002
+ *
+ * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
+ * same time change BL from 1->0, as any pending interrupt of a level
+ * higher than he previous value of IMASK will leak through and be
+ * taken unexpectedly.
+ *
+ * To avoid this we raise the IMASK and then issue another PUTCON to
+ * enable interrupts.
+ */
+ getcon SR, r6
+ movi SR_IMASK | SR_FD, r7
+ or r6, r7, r6
+ putcon r6, SR
+ movi SR_UNBLOCK_EXC, r7
+ and r6, r7, r6
+ putcon r6, SR
+
+
+ /* Now call the appropriate 3rd level handler */
+ or r3, ZERO, LINK
+ movi trap_jtable, r3
+ shlri r2, 3, r2
+ ldx.l r2, r3, r3
+ shlri r2, 2, r2
+ ptabs r3, tr0
+ or SP, ZERO, r3
+ blink tr0, ZERO
+
+/*
+ * Second level handler for VBR-based exceptions. Post-handlers.
+ *
+ * Post-handlers for interrupts (ret_from_irq), exceptions
+ * (ret_from_exception) and common reentrance doors (restore_all
+ * to get back to the original context, ret_from_syscall loop to
+ * check kernel exiting).
+ *
+ * ret_with_reschedule and work_notifysig are an inner lables of
+ * the ret_from_syscall loop.
+ *
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (SP) struct pt_regs *, original register's frame pointer (basic)
+ *
+ */
+ .global ret_from_irq
+ret_from_irq:
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_irq, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+ STI()
+ pta ret_with_reschedule, tr0
+ blink tr0, ZERO /* Do not check softirqs */
+
+ .global ret_from_exception
+ret_from_exception:
+ preempt_stop()
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_exc, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+
+ /* Check softirqs */
+
+#ifdef CONFIG_PREEMPT
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+resume_kernel:
+ pta restore_all, tr0
+
+ getcon KCR0, r6
+ ld.l r6, TI_PRE_COUNT, r7
+ beq/u r7, ZERO, tr0
+
+need_resched:
+ ld.l r6, TI_FLAGS, r7
+ movi (1 << TIF_NEED_RESCHED), r8
+ and r8, r7, r8
+ bne r8, ZERO, tr0
+
+ getcon SR, r7
+ andi r7, 0xf0, r7
+ bne r7, ZERO, tr0
+
+ movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
+ shori (PREEMPT_ACTIVE & 65535), r8
+ st.l r6, TI_PRE_COUNT, r8
+
+ STI()
+ movi schedule, r7
+ ori r7, 1, r7
+ ptabs r7, tr1
+ blink tr1, LINK
+
+ st.l r6, TI_PRE_COUNT, ZERO
+ CLI()
+
+ pta need_resched, tr1
+ blink tr1, ZERO
+#endif
+
+ .global ret_from_syscall
+ret_from_syscall:
+
+ret_with_reschedule:
+ getcon KCR0, r6 ! r6 contains current_thread_info
+ ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
+
+ movi _TIF_NEED_RESCHED, r8
+ and r8, r7, r8
+ pta work_resched, tr0
+ bne r8, ZERO, tr0
+
+ pta restore_all, tr1
+
+ movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+ and r8, r7, r8
+ pta work_notifysig, tr0
+ bne r8, ZERO, tr0
+
+ blink tr1, ZERO
+
+work_resched:
+ pta ret_from_syscall, tr0
+ gettr tr0, LINK
+ movi schedule, r6
+ ptabs r6, tr0
+ blink tr0, ZERO /* Call schedule(), return on top */
+
+work_notifysig:
+ gettr tr1, LINK
+
+ movi do_signal, r6
+ ptabs r6, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ blink tr0, LINK /* Call do_signal(regs, 0), return here */
+
+restore_all:
+ /* Do prefetches */
+
+ ld.q SP, FRAME_T(0), r6
+ ld.q SP, FRAME_T(1), r7
+ ld.q SP, FRAME_T(2), r8
+ ld.q SP, FRAME_T(3), r9
+ ptabs r6, tr0
+ ptabs r7, tr1
+ ptabs r8, tr2
+ ptabs r9, tr3
+ ld.q SP, FRAME_T(4), r6
+ ld.q SP, FRAME_T(5), r7
+ ld.q SP, FRAME_T(6), r8
+ ld.q SP, FRAME_T(7), r9
+ ptabs r6, tr4
+ ptabs r7, tr5
+ ptabs r8, tr6
+ ptabs r9, tr7
+
+ ld.q SP, FRAME_R(0), r0
+ ld.q SP, FRAME_R(1), r1
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+ ld.q SP, FRAME_R(8), r8
+ ld.q SP, FRAME_R(9), r9
+ ld.q SP, FRAME_R(10), r10
+ ld.q SP, FRAME_R(11), r11
+ ld.q SP, FRAME_R(12), r12
+ ld.q SP, FRAME_R(13), r13
+ ld.q SP, FRAME_R(14), r14
+
+ ld.q SP, FRAME_R(16), r16
+ ld.q SP, FRAME_R(17), r17
+ ld.q SP, FRAME_R(18), r18
+ ld.q SP, FRAME_R(19), r19
+ ld.q SP, FRAME_R(20), r20
+ ld.q SP, FRAME_R(21), r21
+ ld.q SP, FRAME_R(22), r22
+ ld.q SP, FRAME_R(23), r23
+ ld.q SP, FRAME_R(24), r24
+ ld.q SP, FRAME_R(25), r25
+ ld.q SP, FRAME_R(26), r26
+ ld.q SP, FRAME_R(27), r27
+ ld.q SP, FRAME_R(28), r28
+ ld.q SP, FRAME_R(29), r29
+ ld.q SP, FRAME_R(30), r30
+ ld.q SP, FRAME_R(31), r31
+ ld.q SP, FRAME_R(32), r32
+ ld.q SP, FRAME_R(33), r33
+ ld.q SP, FRAME_R(34), r34
+ ld.q SP, FRAME_R(35), r35
+ ld.q SP, FRAME_R(36), r36
+ ld.q SP, FRAME_R(37), r37
+ ld.q SP, FRAME_R(38), r38
+ ld.q SP, FRAME_R(39), r39
+ ld.q SP, FRAME_R(40), r40
+ ld.q SP, FRAME_R(41), r41
+ ld.q SP, FRAME_R(42), r42
+ ld.q SP, FRAME_R(43), r43
+ ld.q SP, FRAME_R(44), r44
+ ld.q SP, FRAME_R(45), r45
+ ld.q SP, FRAME_R(46), r46
+ ld.q SP, FRAME_R(47), r47
+ ld.q SP, FRAME_R(48), r48
+ ld.q SP, FRAME_R(49), r49
+ ld.q SP, FRAME_R(50), r50
+ ld.q SP, FRAME_R(51), r51
+ ld.q SP, FRAME_R(52), r52
+ ld.q SP, FRAME_R(53), r53
+ ld.q SP, FRAME_R(54), r54
+ ld.q SP, FRAME_R(55), r55
+ ld.q SP, FRAME_R(56), r56
+ ld.q SP, FRAME_R(57), r57
+ ld.q SP, FRAME_R(58), r58
+
+ getcon SR, r59
+ movi SR_BLOCK_EXC, r60
+ or r59, r60, r59
+ putcon r59, SR /* SR.BL = 1, keep nesting out */
+ ld.q SP, FRAME_S(FSSR), r61
+ ld.q SP, FRAME_S(FSPC), r62
+ movi SR_ASID_MASK, r60
+ and r59, r60, r59
+ andc r61, r60, r61 /* Clear out older ASID */
+ or r59, r61, r61 /* Retain current ASID */
+ putcon r61, SSR
+ putcon r62, SPC
+
+ /* Ignore FSYSCALL_ID */
+
+ ld.q SP, FRAME_R(59), r59
+ ld.q SP, FRAME_R(60), r60
+ ld.q SP, FRAME_R(61), r61
+ ld.q SP, FRAME_R(62), r62
+
+ /* Last touch */
+ ld.q SP, FRAME_R(15), SP
+ rte
+ nop
+
+/*
+ * Third level handlers for VBR-based exceptions. Adapting args to
+ * and/or deflecting to fourth level handlers.
+ *
+ * Fourth level handlers interface.
+ * Most are C-coded handlers directly pointed by the trap_jtable.
+ * (Third = Fourth level)
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
+ * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
+ * (r5) TRA control register (for syscall/debug benefit only)
+ * (LINK) return address
+ * (SP) = r3
+ *
+ * Kernel TLB fault handlers will get a slightly different interface.
+ * (r2) struct pt_regs *, original register's frame pointer
+ * (r3) writeaccess, whether it's a store fault as opposed to load fault
+ * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
+ * (r5) Effective Address of fault
+ * (LINK) return address
+ * (SP) = r2
+ *
+ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
+ *
+ */
+tlb_miss_load:
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+tlb_miss_store:
+ or SP, ZERO, r2
+ movi 1, r3 /* Write */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+itlb_miss_or_IRQ:
+ pta its_IRQ, tr0
+ beqi/u r4, EVENT_INTERRUPT, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ movi 1, r4 /* Text */
+ getcon TEA, r5
+ /* Fall through */
+
+call_do_page_fault:
+ movi do_page_fault, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQA:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQB:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+its_IRQ:
+ movi do_IRQ, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+/*
+ * system_call/unknown_trap third level handler:
+ *
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (TRAP = 11)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
+ * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
+ * (SP) = r3
+ * (LINK) return address: ret_from_exception
+ * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
+ *
+ * Outputs:
+ * (*r3) Syscall reply (Saved r2)
+ * (LINK) In case of syscall only it can be scrapped.
+ * Common second level post handler will be ret_from_syscall.
+ * Common (non-trace) exit point to that is syscall_ret (saving
+ * result to r2). Common bad exit point is syscall_bad (returning
+ * ENOSYS then saved to r2).
+ *
+ */
+
+unknown_trap:
+ /* Unknown Trap or User Trace */
+ movi do_unknown_trapa, r6
+ ptabs r6, tr0
+ ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
+ andi r2, 0x1ff, r2 /* r2 = syscall # */
+ blink tr0, LINK
+
+ pta syscall_ret, tr0
+ blink tr0, ZERO
+
+ /* New syscall implementation*/
+system_call:
+ pta unknown_trap, tr0
+ or r5, ZERO, r4 /* TRA (=r5) -> r4 */
+ shlri r4, 20, r4
+ bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
+
+ /* It's a system call */
+ st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
+ andi r5, 0x1ff, r5 /* syscall # -> r5 */
+
+ STI()
+
+ pta syscall_allowed, tr0
+ movi NR_syscalls - 1, r4 /* Last valid */
+ bgeu/l r4, r5, tr0
+
+syscall_bad:
+ /* Return ENOSYS ! */
+ movi -(ENOSYS), r2 /* Fall-through */
+
+ .global syscall_ret
+syscall_ret:
+ st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+/* A different return path for ret_from_fork, because we now need
+ * to call schedule_tail with the later kernels. Because prev is
+ * loaded into r2 by switch_to() means we can just call it straight away
+ */
+
+.global ret_from_fork
+ret_from_fork:
+
+ movi schedule_tail,r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ blink tr0, LINK
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+
+syscall_allowed:
+ /* Use LINK to deflect the exit point, default is syscall_ret */
+ pta syscall_ret, tr0
+ gettr tr0, LINK
+ pta syscall_notrace, tr0
+
+ getcon KCR0, r2
+ ld.l r2, TI_FLAGS, r4
+ movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
+ and r6, r4, r6
+ beq/l r6, ZERO, tr0
+
+ /* Trace it by calling syscall_trace before and after */
+ movi syscall_trace, r4
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ ptabs r4, tr0
+ blink tr0, LINK
+
+ /* Reload syscall number as r5 is trashed by syscall_trace */
+ ld.q SP, FRAME_S(FSYSCALL_ID), r5
+ andi r5, 0x1ff, r5
+
+ pta syscall_ret_trace, tr0
+ gettr tr0, LINK
+
+syscall_notrace:
+ /* Now point to the appropriate 4th level syscall handler */
+ movi sys_call_table, r4
+ shlli r5, 2, r5
+ ldx.l r4, r5, r5
+ ptabs r5, tr0
+
+ /* Prepare original args */
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+
+ /* And now the trick for those syscalls requiring regs * ! */
+ or SP, ZERO, r8
+
+ /* Call it */
+ blink tr0, ZERO /* LINK is already properly set */
+
+syscall_ret_trace:
+ /* We get back here only if under trace */
+ st.q SP, FRAME_R(9), r2 /* Save return value */
+
+ movi syscall_trace, LINK
+ or SP, ZERO, r2
+ movi 1, r3
+ ptabs LINK, tr0
+ blink tr0, LINK
+
+ /* This needs to be done after any syscall tracing */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO /* Resume normal return sequence */
+
+/*
+ * --- Switch to running under a particular ASID and return the previous ASID value
+ * --- The caller is assumed to have done a cli before calling this.
+ *
+ * Input r2 : new ASID
+ * Output r2 : old ASID
+ */
+
+ .global switch_and_save_asid
+switch_and_save_asid:
+ getcon sr, r0
+ movi 255, r4
+ shlli r4, 16, r4 /* r4 = mask to select ASID */
+ and r0, r4, r3 /* r3 = shifted old ASID */
+ andi r2, 255, r2 /* mask down new ASID */
+ shlli r2, 16, r2 /* align new ASID against SR.ASID */
+ andc r0, r4, r0 /* efface old ASID from SR */
+ or r0, r2, r0 /* insert the new ASID */
+ putcon r0, ssr
+ movi 1f, r0
+ putcon r0, spc
+ rte
+ nop
+1:
+ ptabs LINK, tr0
+ shlri r3, 16, r2 /* r2 = old ASID */
+ blink tr0, r63
+
+ .global route_to_panic_handler
+route_to_panic_handler:
+ /* Switch to real mode, goto panic_handler, don't return. Useful for
+ last-chance debugging, e.g. if no output wants to go to the console.
+ */
+
+ movi panic_handler - CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ pta 1f, tr1
+ gettr tr1, r0
+ putcon r0, spc
+ getcon sr, r0
+ movi 1, r1
+ shlli r1, 31, r1
+ andc r0, r1, r0
+ putcon r0, ssr
+ rte
+ nop
+1: /* Now in real mode */
+ blink tr0, r63
+ nop
+
+ .global peek_real_address_q
+peek_real_address_q:
+ /* Two args:
+ r2 : real mode address to peek
+ r2(out) : result quadword
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.peek0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual peek. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ ld.q r2, 0, r2
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+ .global poke_real_address_q
+poke_real_address_q:
+ /* Two args:
+ r2 : real mode address to poke
+ r3 : quadword value to write.
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.poke0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual poke. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ st.q r2, 0, r3
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+/*
+ * --- User Access Handling Section
+ */
+
+/*
+ * User Access support. It all moved to non inlined Assembler
+ * functions in here.
+ *
+ * __kernel_size_t __copy_user(void *__to, const void *__from,
+ * __kernel_size_t __n)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) size in bytes
+ *
+ * Ouputs:
+ * (*r2) target data
+ * (r2) non-copied bytes
+ *
+ * If a fault occurs on the user pointer, bail out early and return the
+ * number of bytes not copied in r2.
+ * Strategy : for large blocks, call a real memcpy function which can
+ * move >1 byte at a time using unaligned ld/st instructions, and can
+ * manipulate the cache using prefetch + alloco to improve the speed
+ * further. If a fault occurs in that function, just revert to the
+ * byte-by-byte approach used for small blocks; this is rare so the
+ * performance hit for that case does not matter.
+ *
+ * For small blocks it's not worth the overhead of setting up and calling
+ * the memcpy routine; do the copy a byte at a time.
+ *
+ */
+ .global __copy_user
+__copy_user:
+ pta __copy_user_byte_by_byte, tr1
+ movi 16, r0 ! this value is a best guess, should tune it by benchmarking
+ bge/u r0, r4, tr1
+ pta copy_user_memcpy, tr0
+ addi SP, -32, SP
+ /* Save arguments in case we have to fix-up unhandled page fault */
+ st.q SP, 0, r2
+ st.q SP, 8, r3
+ st.q SP, 16, r4
+ st.q SP, 24, r35 ! r35 is callee-save
+ /* Save LINK in a register to reduce RTS time later (otherwise
+ ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
+ ori LINK, 0, r35
+ blink tr0, LINK
+
+ /* Copy completed normally if we get back here */
+ ptabs r35, tr0
+ ld.q SP, 24, r35
+ /* don't restore r2-r4, pointless */
+ /* set result=r2 to zero as the copy must have succeeded. */
+ or r63, r63, r2
+ addi SP, 32, SP
+ blink tr0, r63 ! RTS
+
+ .global __copy_user_fixup
+__copy_user_fixup:
+ /* Restore stack frame */
+ ori r35, 0, LINK
+ ld.q SP, 24, r35
+ ld.q SP, 16, r4
+ ld.q SP, 8, r3
+ ld.q SP, 0, r2
+ addi SP, 32, SP
+ /* Fall through to original code, in the 'same' state we entered with */
+
+/* The slow byte-by-byte method is used if the fast copy traps due to a bad
+ user address. In that rare case, the speed drop can be tolerated. */
+__copy_user_byte_by_byte:
+ pta ___copy_user_exit, tr1
+ pta ___copy_user1, tr0
+ beq/u r4, r63, tr1 /* early exit for zero length copy */
+ sub r2, r3, r0
+ addi r0, -1, r0
+
+___copy_user1:
+ ld.b r3, 0, r5 /* Fault address 1 */
+
+ /* Could rewrite this to use just 1 add, but the second comes 'free'
+ due to load latency */
+ addi r3, 1, r3
+ addi r4, -1, r4 /* No real fixup required */
+___copy_user2:
+ stx.b r3, r0, r5 /* Fault address 2 */
+ bne r4, ZERO, tr0
+
+___copy_user_exit:
+ or r4, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) size in bytes
+ *
+ * Ouputs:
+ * (*r2) zero-ed target data
+ * (r2) non-zero-ed bytes
+ */
+ .global __clear_user
+__clear_user:
+ pta ___clear_user_exit, tr1
+ pta ___clear_user1, tr0
+ beq/u r3, r63, tr1
+
+___clear_user1:
+ st.b r2, 0, ZERO /* Fault address */
+ addi r2, 1, r2
+ addi r3, -1, r3 /* No real fixup required */
+ bne r3, ZERO, tr0
+
+___clear_user_exit:
+ or r3, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+/*
+ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
+ * int __count)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) maximum size in bytes
+ *
+ * Ouputs:
+ * (*r2) copied data
+ * (r2) -EFAULT (in case of faulting)
+ * copied data (otherwise)
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ pta ___strncpy_from_user1, tr0
+ pta ___strncpy_from_user_done, tr1
+ or r4, ZERO, r5 /* r5 = original count */
+ beq/u r4, r63, tr1 /* early exit if r4==0 */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+
+___strncpy_from_user1:
+ ld.b r3, 0, r7 /* Fault address: only in reading */
+ st.b r2, 0, r7
+ addi r2, 1, r2
+ addi r3, 1, r3
+ beq/u ZERO, r7, tr1
+ addi r4, -1, r4 /* return real number of copied bytes */
+ bne/l ZERO, r4, tr0
+
+___strncpy_from_user_done:
+ sub r5, r4, r6 /* If done, return copied */
+
+___strncpy_from_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __strnlen_user(const char *__s, long __n)
+ *
+ * Inputs:
+ * (r2) source address
+ * (r3) source size in bytes
+ *
+ * Ouputs:
+ * (r2) -EFAULT (in case of faulting)
+ * string length (otherwise)
+ */
+ .global __strnlen_user
+__strnlen_user:
+ pta ___strnlen_user_set_reply, tr0
+ pta ___strnlen_user1, tr1
+ or ZERO, ZERO, r5 /* r5 = counter */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+ beq r3, ZERO, tr0
+
+___strnlen_user1:
+ ldx.b r2, r5, r7 /* Fault address: only in reading */
+ addi r3, -1, r3 /* No real fixup */
+ addi r5, 1, r5
+ beq r3, ZERO, tr0
+ bne r7, ZERO, tr1
+! The line below used to be active. This meant led to a junk byte lying between each pair
+! of entries in the argv & envp structures in memory. Whilst the program saw the right data
+! via the argv and envp arguments to main, it meant the 'flat' representation visible through
+! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
+! addi r5, 1, r5 /* Include '\0' */
+
+___strnlen_user_set_reply:
+ or r5, ZERO, r6 /* If done, return counter */
+
+___strnlen_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __get_user_asm_?(void *val, long addr)
+ *
+ * Inputs:
+ * (r2) dest address
+ * (r3) source address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __get_user_asm_b
+__get_user_asm_b:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_b1:
+ ld.b r3, 0, r5 /* r5 = data */
+ st.b r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_w
+__get_user_asm_w:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_w1:
+ ld.w r3, 0, r5 /* r5 = data */
+ st.w r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_l
+__get_user_asm_l:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_l1:
+ ld.l r3, 0, r5 /* r5 = data */
+ st.l r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_q
+__get_user_asm_q:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_q1:
+ ld.q r3, 0, r5 /* r5 = data */
+ st.q r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __put_user_asm_?(void *pval, long addr)
+ *
+ * Inputs:
+ * (r2) kernel pointer to value
+ * (r3) dest address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __put_user_asm_b
+__put_user_asm_b:
+ ld.b r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_b1:
+ st.b r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_w
+__put_user_asm_w:
+ ld.w r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_w1:
+ st.w r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_l
+__put_user_asm_l:
+ ld.l r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_l1:
+ st.l r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_q
+__put_user_asm_q:
+ ld.q r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_q1:
+ st.q r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+panic_stash_regs:
+ /* The idea is : when we get an unhandled panic, we dump the registers
+ to a known memory location, the just sit in a tight loop.
+ This allows the human to look at the memory region through the GDB
+ session (assuming the debug module's SHwy initiator isn't locked up
+ or anything), to hopefully analyze the cause of the panic. */
+
+ /* On entry, former r15 (SP) is in DCR
+ former r0 is at resvec_saved_area + 0
+ former r1 is at resvec_saved_area + 8
+ former tr0 is at resvec_saved_area + 32
+ DCR is the only register whose value is lost altogether.
+ */
+
+ movi 0xffffffff80000000, r0 ! phy of dump area
+ ld.q SP, 0x000, r1 ! former r0
+ st.q r0, 0x000, r1
+ ld.q SP, 0x008, r1 ! former r1
+ st.q r0, 0x008, r1
+ st.q r0, 0x010, r2
+ st.q r0, 0x018, r3
+ st.q r0, 0x020, r4
+ st.q r0, 0x028, r5
+ st.q r0, 0x030, r6
+ st.q r0, 0x038, r7
+ st.q r0, 0x040, r8
+ st.q r0, 0x048, r9
+ st.q r0, 0x050, r10
+ st.q r0, 0x058, r11
+ st.q r0, 0x060, r12
+ st.q r0, 0x068, r13
+ st.q r0, 0x070, r14
+ getcon dcr, r14
+ st.q r0, 0x078, r14
+ st.q r0, 0x080, r16
+ st.q r0, 0x088, r17
+ st.q r0, 0x090, r18
+ st.q r0, 0x098, r19
+ st.q r0, 0x0a0, r20
+ st.q r0, 0x0a8, r21
+ st.q r0, 0x0b0, r22
+ st.q r0, 0x0b8, r23
+ st.q r0, 0x0c0, r24
+ st.q r0, 0x0c8, r25
+ st.q r0, 0x0d0, r26
+ st.q r0, 0x0d8, r27
+ st.q r0, 0x0e0, r28
+ st.q r0, 0x0e8, r29
+ st.q r0, 0x0f0, r30
+ st.q r0, 0x0f8, r31
+ st.q r0, 0x100, r32
+ st.q r0, 0x108, r33
+ st.q r0, 0x110, r34
+ st.q r0, 0x118, r35
+ st.q r0, 0x120, r36
+ st.q r0, 0x128, r37
+ st.q r0, 0x130, r38
+ st.q r0, 0x138, r39
+ st.q r0, 0x140, r40
+ st.q r0, 0x148, r41
+ st.q r0, 0x150, r42
+ st.q r0, 0x158, r43
+ st.q r0, 0x160, r44
+ st.q r0, 0x168, r45
+ st.q r0, 0x170, r46
+ st.q r0, 0x178, r47
+ st.q r0, 0x180, r48
+ st.q r0, 0x188, r49
+ st.q r0, 0x190, r50
+ st.q r0, 0x198, r51
+ st.q r0, 0x1a0, r52
+ st.q r0, 0x1a8, r53
+ st.q r0, 0x1b0, r54
+ st.q r0, 0x1b8, r55
+ st.q r0, 0x1c0, r56
+ st.q r0, 0x1c8, r57
+ st.q r0, 0x1d0, r58
+ st.q r0, 0x1d8, r59
+ st.q r0, 0x1e0, r60
+ st.q r0, 0x1e8, r61
+ st.q r0, 0x1f0, r62
+ st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
+
+ ld.q SP, 0x020, r1 ! former tr0
+ st.q r0, 0x200, r1
+ gettr tr1, r1
+ st.q r0, 0x208, r1
+ gettr tr2, r1
+ st.q r0, 0x210, r1
+ gettr tr3, r1
+ st.q r0, 0x218, r1
+ gettr tr4, r1
+ st.q r0, 0x220, r1
+ gettr tr5, r1
+ st.q r0, 0x228, r1
+ gettr tr6, r1
+ st.q r0, 0x230, r1
+ gettr tr7, r1
+ st.q r0, 0x238, r1
+
+ getcon sr, r1
+ getcon ssr, r2
+ getcon pssr, r3
+ getcon spc, r4
+ getcon pspc, r5
+ getcon intevt, r6
+ getcon expevt, r7
+ getcon pexpevt, r8
+ getcon tra, r9
+ getcon tea, r10
+ getcon kcr0, r11
+ getcon kcr1, r12
+ getcon vbr, r13
+ getcon resvec, r14
+
+ st.q r0, 0x240, r1
+ st.q r0, 0x248, r2
+ st.q r0, 0x250, r3
+ st.q r0, 0x258, r4
+ st.q r0, 0x260, r5
+ st.q r0, 0x268, r6
+ st.q r0, 0x270, r7
+ st.q r0, 0x278, r8
+ st.q r0, 0x280, r9
+ st.q r0, 0x288, r10
+ st.q r0, 0x290, r11
+ st.q r0, 0x298, r12
+ st.q r0, 0x2a0, r13
+ st.q r0, 0x2a8, r14
+
+ getcon SPC,r2
+ getcon SSR,r3
+ getcon EXPEVT,r4
+ /* Prepare to jump to C - physical address */
+ movi panic_handler-CONFIG_PAGE_OFFSET, r1
+ ori r1, 1, r1
+ ptabs r1, tr0
+ getcon DCR, SP
+ blink tr0, ZERO
+ nop
+ nop
+ nop
+ nop
+
+
+
+
+/*
+ * --- Signal Handling Section
+ */
+
+/*
+ * extern long long _sa_default_rt_restorer
+ * extern long long _sa_default_restorer
+ *
+ * or, better,
+ *
+ * extern void _sa_default_rt_restorer(void)
+ * extern void _sa_default_restorer(void)
+ *
+ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
+ * from user space. Copied into user space by signal management.
+ * Both must be quad aligned and 2 quad long (4 instructions).
+ *
+ */
+ .balign 8
+ .global sa_default_rt_restorer
+sa_default_rt_restorer:
+ movi 0x10, r9
+ shori __NR_rt_sigreturn, r9
+ trapa r9
+ nop
+
+ .balign 8
+ .global sa_default_restorer
+sa_default_restorer:
+ movi 0x10, r9
+ shori __NR_sigreturn, r9
+ trapa r9
+ nop
+
+/*
+ * --- __ex_table Section
+ */
+
+/*
+ * User Access Exception Table.
+ */
+ .section __ex_table, "a"
+
+ .global asm_uaccess_start /* Just a marker */
+asm_uaccess_start:
+
+ .long ___copy_user1, ___copy_user_exit
+ .long ___copy_user2, ___copy_user_exit
+ .long ___clear_user1, ___clear_user_exit
+ .long ___strncpy_from_user1, ___strncpy_from_user_exit
+ .long ___strnlen_user1, ___strnlen_user_exit
+ .long ___get_user_asm_b1, ___get_user_asm_b_exit
+ .long ___get_user_asm_w1, ___get_user_asm_w_exit
+ .long ___get_user_asm_l1, ___get_user_asm_l_exit
+ .long ___get_user_asm_q1, ___get_user_asm_q_exit
+ .long ___put_user_asm_b1, ___put_user_asm_b_exit
+ .long ___put_user_asm_w1, ___put_user_asm_w_exit
+ .long ___put_user_asm_l1, ___put_user_asm_l_exit
+ .long ___put_user_asm_q1, ___put_user_asm_q_exit
+
+ .global asm_uaccess_end /* Just a marker */
+asm_uaccess_end:
+
+
+
+
+/*
+ * --- .text.init Section
+ */
+
+ .section .text.init, "ax"
+
+/*
+ * void trap_init (void)
+ *
+ */
+ .global trap_init
+trap_init:
+ addi SP, -24, SP /* Room to save r28/r29/r30 */
+ st.q SP, 0, r28
+ st.q SP, 8, r29
+ st.q SP, 16, r30
+
+ /* Set VBR and RESVEC */
+ movi LVBR_block, r19
+ andi r19, -4, r19 /* reset MMUOFF + reserved */
+ /* For RESVEC exceptions we force the MMU off, which means we need the
+ physical address. */
+ movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
+ andi r20, -4, r20 /* reset reserved */
+ ori r20, 1, r20 /* set MMUOFF */
+ putcon r19, VBR
+ putcon r20, RESVEC
+
+ /* Sanity check */
+ movi LVBR_block_end, r21
+ andi r21, -4, r21
+ movi BLOCK_SIZE, r29 /* r29 = expected size */
+ or r19, ZERO, r30
+ add r19, r29, r19
+
+ /*
+ * Ugly, but better loop forever now than crash afterwards.
+ * We should print a message, but if we touch LVBR or
+ * LRESVEC blocks we should not be surprised if we get stuck
+ * in trap_init().
+ */
+ pta trap_init_loop, tr1
+ gettr tr1, r28 /* r28 = trap_init_loop */
+ sub r21, r30, r30 /* r30 = actual size */
+
+ /*
+ * VBR/RESVEC handlers overlap by being bigger than
+ * allowed. Very bad. Just loop forever.
+ * (r28) panic/loop address
+ * (r29) expected size
+ * (r30) actual size
+ */
+trap_init_loop:
+ bne r19, r21, tr1
+
+ /* Now that exception vectors are set up reset SR.BL */
+ getcon SR, r22
+ movi SR_UNBLOCK_EXC, r23
+ and r22, r23, r22
+ putcon r22, SR
+
+ addi SP, 24, SP
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
new file mode 100644
index 00000000000..30b76a94abf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -0,0 +1,166 @@
+/*
+ * arch/sh/kernel/cpu/sh5/fpu.c
+ *
+ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
+ * Copyright (C) 2002 STMicroelectronics Limited
+ * Author : Stuart Menefy
+ *
+ * Started from SH4 version:
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/user.h>
+#include <asm/io.h>
+
+/*
+ * Initially load the FPU with signalling NANS. This bit pattern
+ * has the property that no matter whether considered as single or as
+ * double precision, it still represents a signalling NAN.
+ */
+#define sNAN64 0xFFFFFFFFFFFFFFFFULL
+#define sNAN32 0xFFFFFFFFUL
+
+static union sh_fpu_union init_fpuregs = {
+ .hard = {
+ .fp_regs = { [0 ... 63] = sNAN32 },
+ .fpscr = FPSCR_INIT
+ }
+};
+
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ asm volatile("fst.p %0, (0*8), fp0\n\t"
+ "fst.p %0, (1*8), fp2\n\t"
+ "fst.p %0, (2*8), fp4\n\t"
+ "fst.p %0, (3*8), fp6\n\t"
+ "fst.p %0, (4*8), fp8\n\t"
+ "fst.p %0, (5*8), fp10\n\t"
+ "fst.p %0, (6*8), fp12\n\t"
+ "fst.p %0, (7*8), fp14\n\t"
+ "fst.p %0, (8*8), fp16\n\t"
+ "fst.p %0, (9*8), fp18\n\t"
+ "fst.p %0, (10*8), fp20\n\t"
+ "fst.p %0, (11*8), fp22\n\t"
+ "fst.p %0, (12*8), fp24\n\t"
+ "fst.p %0, (13*8), fp26\n\t"
+ "fst.p %0, (14*8), fp28\n\t"
+ "fst.p %0, (15*8), fp30\n\t"
+ "fst.p %0, (16*8), fp32\n\t"
+ "fst.p %0, (17*8), fp34\n\t"
+ "fst.p %0, (18*8), fp36\n\t"
+ "fst.p %0, (19*8), fp38\n\t"
+ "fst.p %0, (20*8), fp40\n\t"
+ "fst.p %0, (21*8), fp42\n\t"
+ "fst.p %0, (22*8), fp44\n\t"
+ "fst.p %0, (23*8), fp46\n\t"
+ "fst.p %0, (24*8), fp48\n\t"
+ "fst.p %0, (25*8), fp50\n\t"
+ "fst.p %0, (26*8), fp52\n\t"
+ "fst.p %0, (27*8), fp54\n\t"
+ "fst.p %0, (28*8), fp56\n\t"
+ "fst.p %0, (29*8), fp58\n\t"
+ "fst.p %0, (30*8), fp60\n\t"
+ "fst.p %0, (31*8), fp62\n\t"
+
+ "fgetscr fr63\n\t"
+ "fst.s %0, (32*8), fr63\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.fpu.hard)
+ : "memory");
+}
+
+static inline void
+fpload(struct sh_fpu_hard_struct *fpregs)
+{
+ asm volatile("fld.p %0, (0*8), fp0\n\t"
+ "fld.p %0, (1*8), fp2\n\t"
+ "fld.p %0, (2*8), fp4\n\t"
+ "fld.p %0, (3*8), fp6\n\t"
+ "fld.p %0, (4*8), fp8\n\t"
+ "fld.p %0, (5*8), fp10\n\t"
+ "fld.p %0, (6*8), fp12\n\t"
+ "fld.p %0, (7*8), fp14\n\t"
+ "fld.p %0, (8*8), fp16\n\t"
+ "fld.p %0, (9*8), fp18\n\t"
+ "fld.p %0, (10*8), fp20\n\t"
+ "fld.p %0, (11*8), fp22\n\t"
+ "fld.p %0, (12*8), fp24\n\t"
+ "fld.p %0, (13*8), fp26\n\t"
+ "fld.p %0, (14*8), fp28\n\t"
+ "fld.p %0, (15*8), fp30\n\t"
+ "fld.p %0, (16*8), fp32\n\t"
+ "fld.p %0, (17*8), fp34\n\t"
+ "fld.p %0, (18*8), fp36\n\t"
+ "fld.p %0, (19*8), fp38\n\t"
+ "fld.p %0, (20*8), fp40\n\t"
+ "fld.p %0, (21*8), fp42\n\t"
+ "fld.p %0, (22*8), fp44\n\t"
+ "fld.p %0, (23*8), fp46\n\t"
+ "fld.p %0, (24*8), fp48\n\t"
+ "fld.p %0, (25*8), fp50\n\t"
+ "fld.p %0, (26*8), fp52\n\t"
+ "fld.p %0, (27*8), fp54\n\t"
+ "fld.p %0, (28*8), fp56\n\t"
+ "fld.p %0, (29*8), fp58\n\t"
+ "fld.p %0, (30*8), fp60\n\t"
+
+ "fld.s %0, (32*8), fr63\n\t"
+ "fputscr fr63\n\t"
+
+ "fld.p %0, (31*8), fp62\n\t"
+ : /* no output */
+ : "r" (fpregs) );
+}
+
+void fpinit(struct sh_fpu_hard_struct *fpregs)
+{
+ *fpregs = init_fpuregs.hard;
+}
+
+asmlinkage void
+do_fpu_error(unsigned long ex, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ regs->pc += 4;
+
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+ force_sig(SIGFPE, tsk);
+}
+
+
+asmlinkage void
+do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
+{
+ void die(const char *str, struct pt_regs *regs, long err);
+
+ if (! user_mode(regs))
+ die("FPU used in kernel", regs, ex);
+
+ regs->sr &= ~SR_FD;
+
+ if (last_task_used_math == current)
+ return;
+
+ enable_fpu();
+ if (last_task_used_math != NULL)
+ /* Other processes fpu state, save away */
+ save_fpu(last_task_used_math, regs);
+
+ last_task_used_math = current;
+ if (used_math()) {
+ fpload(&current->thread.fpu.hard);
+ } else {
+ /* First time FPU user. */
+ fpload(&init_fpuregs.hard);
+ set_used_math();
+ }
+ disable_fpu();
+}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
new file mode 100644
index 00000000000..15d167fd0ae
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -0,0 +1,76 @@
+/*
+ * arch/sh/kernel/cpu/sh5/probe.c
+ *
+ * CPU Subtype Probing for SH-5.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+ unsigned long long cir;
+
+ /* Do peeks in real mode to avoid having to set up a mapping for the
+ WPC registers. On SH5-101 cut2, such a mapping would be exposed to
+ an address translation erratum which would make it hard to set up
+ correctly. */
+ cir = peek_real_address_q(0x0d000008);
+ if ((cir & 0xffff) == 0x5103) {
+ boot_cpu_data.type = CPU_SH5_103;
+ } else if (((cir >> 32) & 0xffff) == 0x51e2) {
+ /* CPU.VCR aliased at CIR address on SH5-101 */
+ boot_cpu_data.type = CPU_SH5_101;
+ } else {
+ boot_cpu_data.type = CPU_SH_NONE;
+ }
+
+ /*
+ * First, setup some sane values for the I-cache.
+ */
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.icache.sets = 256;
+ boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+
+#if 0
+ /*
+ * FIXME: This can probably be cleaned up a bit as well.. for example,
+ * do we really need the way shift _and_ the way_step_shift ?? Judging
+ * by the existing code, I would guess no.. is there any valid reason
+ * why we need to be tracking this around?
+ */
+ boot_cpu_data.icache.way_shift = 13;
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.set_shift = 4;
+ boot_cpu_data.icache.way_step_shift = 16;
+ boot_cpu_data.icache.asid_shift = 2;
+
+ /*
+ * way offset = cache size / associativity, so just don't factor in
+ * associativity in the first place..
+ */
+ boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets *
+ boot_cpu_data.icache.linesz;
+
+ boot_cpu_data.icache.asid_mask = 0x3fc;
+ boot_cpu_data.icache.idx_mask = 0x1fe0;
+ boot_cpu_data.icache.epn_mask = 0xffffe000;
+#endif
+
+ boot_cpu_data.icache.flags = 0;
+
+ /* A trivial starting point.. */
+ memcpy(&boot_cpu_data.dcache,
+ &boot_cpu_data.icache, sizeof(struct cache_info));
+
+ return 0;
+}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
new file mode 100644
index 00000000000..45c351b0f1b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/switchto.S
@@ -0,0 +1,198 @@
+/*
+ * arch/sh/kernel/cpu/sh5/switchto.S
+ *
+ * sh64 context switch
+ *
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+*/
+
+ .section .text..SHmedia32,"ax"
+ .little
+
+ .balign 32
+
+ .type sh64_switch_to,@function
+ .global sh64_switch_to
+ .global __sh64_switch_to_end
+sh64_switch_to:
+
+/* Incoming args
+ r2 - prev
+ r3 - &prev->thread
+ r4 - next
+ r5 - &next->thread
+
+ Outgoing results
+ r2 - last (=prev) : this just stays in r2 throughout
+
+ Want to create a full (struct pt_regs) on the stack to allow backtracing
+ functions to work. However, we only need to populate the callee-save
+ register slots in this structure; since we're a function our ancestors must
+ have themselves preserved all caller saved state in the stack. This saves
+ some wasted effort since we won't need to look at the values.
+
+ In particular, all caller-save registers are immediately available for
+ scratch use.
+
+*/
+
+#define FRAME_SIZE (76*8 + 8)
+
+ movi FRAME_SIZE, r0
+ sub.l r15, r0, r15
+ ! Do normal-style register save to support backtrace
+
+ st.l r15, 0, r18 ! save link reg
+ st.l r15, 4, r14 ! save fp
+ add.l r15, r63, r14 ! setup frame pointer
+
+ ! hopefully this looks normal to the backtrace now.
+
+ addi.l r15, 8, r1 ! base of pt_regs
+ addi.l r1, 24, r0 ! base of pt_regs.regs
+ addi.l r0, (63*8), r8 ! base of pt_regs.trregs
+
+ /* Note : to be fixed?
+ struct pt_regs is really designed for holding the state on entry
+ to an exception, i.e. pc,sr,regs etc. However, for the context
+ switch state, some of this is not required. But the unwinder takes
+ struct pt_regs * as an arg so we have to build this structure
+ to allow unwinding switched tasks in show_state() */
+
+ st.q r0, ( 9*8), r9
+ st.q r0, (10*8), r10
+ st.q r0, (11*8), r11
+ st.q r0, (12*8), r12
+ st.q r0, (13*8), r13
+ st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
+ ! the point where the process is left in suspended animation, i.e. current
+ ! fp here, not the saved one.
+ st.q r0, (16*8), r16
+
+ st.q r0, (24*8), r24
+ st.q r0, (25*8), r25
+ st.q r0, (26*8), r26
+ st.q r0, (27*8), r27
+ st.q r0, (28*8), r28
+ st.q r0, (29*8), r29
+ st.q r0, (30*8), r30
+ st.q r0, (31*8), r31
+ st.q r0, (32*8), r32
+ st.q r0, (33*8), r33
+ st.q r0, (34*8), r34
+ st.q r0, (35*8), r35
+
+ st.q r0, (44*8), r44
+ st.q r0, (45*8), r45
+ st.q r0, (46*8), r46
+ st.q r0, (47*8), r47
+ st.q r0, (48*8), r48
+ st.q r0, (49*8), r49
+ st.q r0, (50*8), r50
+ st.q r0, (51*8), r51
+ st.q r0, (52*8), r52
+ st.q r0, (53*8), r53
+ st.q r0, (54*8), r54
+ st.q r0, (55*8), r55
+ st.q r0, (56*8), r56
+ st.q r0, (57*8), r57
+ st.q r0, (58*8), r58
+ st.q r0, (59*8), r59
+
+ ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
+ ! Use a local label to avoid creating a symbol that will confuse the !
+ ! backtrace
+ pta .Lsave_pc, tr0
+
+ gettr tr5, r45
+ gettr tr6, r46
+ gettr tr7, r47
+ st.q r8, (5*8), r45
+ st.q r8, (6*8), r46
+ st.q r8, (7*8), r47
+
+ ! Now switch context
+ gettr tr0, r9
+ st.l r3, 0, r15 ! prev->thread.sp
+ st.l r3, 8, r1 ! prev->thread.kregs
+ st.l r3, 4, r9 ! prev->thread.pc
+ st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
+
+ ! Load PC for next task (init value or save_pc later)
+ ld.l r5, 4, r18 ! next->thread.pc
+ ! Switch stacks
+ ld.l r5, 0, r15 ! next->thread.sp
+ ptabs r18, tr0
+
+ ! Update current
+ ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
+ putcon r9, kcr0 ! current = next->thread_info
+
+ ! go to save_pc for a reschedule, or the initial thread.pc for a new process
+ blink tr0, r63
+
+ ! Restore (when we come back to a previously saved task)
+.Lsave_pc:
+ addi.l r15, 32, r0 ! r0 = next's regs
+ addi.l r0, (63*8), r8 ! r8 = next's tr_regs
+
+ ld.q r8, (5*8), r45
+ ld.q r8, (6*8), r46
+ ld.q r8, (7*8), r47
+ ptabs r45, tr5
+ ptabs r46, tr6
+ ptabs r47, tr7
+
+ ld.q r0, ( 9*8), r9
+ ld.q r0, (10*8), r10
+ ld.q r0, (11*8), r11
+ ld.q r0, (12*8), r12
+ ld.q r0, (13*8), r13
+ ld.q r0, (14*8), r14
+ ld.q r0, (16*8), r16
+
+ ld.q r0, (24*8), r24
+ ld.q r0, (25*8), r25
+ ld.q r0, (26*8), r26
+ ld.q r0, (27*8), r27
+ ld.q r0, (28*8), r28
+ ld.q r0, (29*8), r29
+ ld.q r0, (30*8), r30
+ ld.q r0, (31*8), r31
+ ld.q r0, (32*8), r32
+ ld.q r0, (33*8), r33
+ ld.q r0, (34*8), r34
+ ld.q r0, (35*8), r35
+
+ ld.q r0, (44*8), r44
+ ld.q r0, (45*8), r45
+ ld.q r0, (46*8), r46
+ ld.q r0, (47*8), r47
+ ld.q r0, (48*8), r48
+ ld.q r0, (49*8), r49
+ ld.q r0, (50*8), r50
+ ld.q r0, (51*8), r51
+ ld.q r0, (52*8), r52
+ ld.q r0, (53*8), r53
+ ld.q r0, (54*8), r54
+ ld.q r0, (55*8), r55
+ ld.q r0, (56*8), r56
+ ld.q r0, (57*8), r57
+ ld.q r0, (58*8), r58
+ ld.q r0, (59*8), r59
+
+ ! epilogue
+ ld.l r15, 0, r18
+ ld.l r15, 4, r14
+ ptabs r18, tr0
+ movi FRAME_SIZE, r0
+ add r15, r0, r15
+ blink tr0, r63
+__sh64_switch_to_end:
+.LFE1:
+ .size sh64_switch_to,.LFE1-sh64_switch_to
+
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
new file mode 100644
index 00000000000..119c20afd4e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -0,0 +1,326 @@
+/*
+ * arch/sh/kernel/cpu/sh5/unwind.c
+ *
+ * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+static u8 regcache[63];
+
+/*
+ * Finding the previous stack frame isn't horribly straightforward as it is
+ * on some other platforms. In the sh64 case, we don't have "linked" stack
+ * frames, so we need to do a bit of work to determine the previous frame,
+ * and in turn, the previous r14/r18 pair.
+ *
+ * There are generally a few cases which determine where we can find out
+ * the r14/r18 values. In the general case, this can be determined by poking
+ * around the prologue of the symbol PC is in (note that we absolutely must
+ * have frame pointer support as well as the kernel symbol table mapped,
+ * otherwise we can't even get this far).
+ *
+ * In other cases, such as the interrupt/exception path, we can poke around
+ * the sp/fp.
+ *
+ * Notably, this entire approach is somewhat error prone, and in the event
+ * that the previous frame cannot be determined, that's all we can do.
+ * Either way, this still leaves us with a more correct backtrace then what
+ * we would be able to come up with by walking the stack (which is garbage
+ * for anything beyond the first frame).
+ * -- PFM.
+ */
+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+ unsigned long *pprev_fp, unsigned long *pprev_pc,
+ struct pt_regs *regs)
+{
+ const char *sym;
+ char namebuf[128];
+ unsigned long offset;
+ unsigned long prologue = 0;
+ unsigned long fp_displacement = 0;
+ unsigned long fp_prev = 0;
+ unsigned long offset_r14 = 0, offset_r18 = 0;
+ int i, found_prologue_end = 0;
+
+ sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
+ if (!sym)
+ return -EINVAL;
+
+ prologue = pc - offset;
+ if (!prologue)
+ return -EINVAL;
+
+ /* Validate fp, to avoid risk of dereferencing a bad pointer later.
+ Assume 128Mb since that's the amount of RAM on a Cayman. Modify
+ when there is an SH-5 board with more. */
+ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
+ (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
+ ((fp & 7) != 0)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Depth to walk, depth is completely arbitrary.
+ */
+ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
+ unsigned long op;
+ u8 major, minor;
+ u8 src, dest, disp;
+
+ op = *(unsigned long *)prologue;
+
+ major = (op >> 26) & 0x3f;
+ src = (op >> 20) & 0x3f;
+ minor = (op >> 16) & 0xf;
+ disp = (op >> 10) & 0x3f;
+ dest = (op >> 4) & 0x3f;
+
+ /*
+ * Stack frame creation happens in a number of ways.. in the
+ * general case when the stack frame is less than 511 bytes,
+ * it's generally created by an addi or addi.l:
+ *
+ * addi/addi.l r15, -FRAME_SIZE, r15
+ *
+ * in the event that the frame size is bigger than this, it's
+ * typically created using a movi/sub pair as follows:
+ *
+ * movi FRAME_SIZE, rX
+ * sub r15, rX, r15
+ */
+
+ switch (major) {
+ case (0x00 >> 2):
+ switch (minor) {
+ case 0x8: /* add.l */
+ case 0x9: /* add */
+ /* Look for r15, r63, r14 */
+ if (src == 15 && disp == 63 && dest == 14)
+ found_prologue_end = 1;
+
+ break;
+ case 0xa: /* sub.l */
+ case 0xb: /* sub */
+ if (src != 15 || dest != 15)
+ continue;
+
+ fp_displacement -= regcache[disp];
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+ break;
+ case (0xa8 >> 2): /* st.l */
+ if (src != 15)
+ continue;
+
+ switch (dest) {
+ case 14:
+ if (offset_r14 || fp_displacement == 0)
+ continue;
+
+ offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r14 *= sizeof(unsigned long);
+ offset_r14 += fp_displacement;
+ break;
+ case 18:
+ if (offset_r18 || fp_displacement == 0)
+ continue;
+
+ offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r18 *= sizeof(unsigned long);
+ offset_r18 += fp_displacement;
+ break;
+ }
+
+ break;
+ case (0xcc >> 2): /* movi */
+ if (dest >= 63) {
+ printk(KERN_NOTICE "%s: Invalid dest reg %d "
+ "specified in movi handler. Failed "
+ "opcode was 0x%lx: ", __FUNCTION__,
+ dest, op);
+
+ continue;
+ }
+
+ /* Sign extend */
+ regcache[dest] =
+ ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
+ break;
+ case (0xd0 >> 2): /* addi */
+ case (0xd4 >> 2): /* addi.l */
+ /* Look for r15, -FRAME_SIZE, r15 */
+ if (src != 15 || dest != 15)
+ continue;
+
+ /* Sign extended frame size.. */
+ fp_displacement +=
+ (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+
+ if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
+ break;
+ }
+
+ if (offset_r14 == 0 || fp_prev == 0) {
+ if (!offset_r14)
+ pr_debug("Unable to find r14 offset\n");
+ if (!fp_prev)
+ pr_debug("Unable to find previous fp\n");
+
+ return -EINVAL;
+ }
+
+ /* For innermost leaf function, there might not be a offset_r18 */
+ if (!*pprev_pc && (offset_r18 == 0))
+ return -EINVAL;
+
+ *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
+
+ if (offset_r18)
+ *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
+
+ *pprev_pc &= ~1;
+
+ return 0;
+}
+
+/* Don't put this on the stack since we'll want to call sh64_unwind
+ * when we're close to underflowing the stack anyway. */
+static struct pt_regs here_regs;
+
+extern const char syscall_ret;
+extern const char ret_from_syscall;
+extern const char ret_from_exception;
+extern const char ret_from_irq;
+
+static void sh64_unwind_inner(struct pt_regs *regs);
+
+static void unwind_nested (unsigned long pc, unsigned long fp)
+{
+ if ((fp >= __MEMORY_START) &&
+ ((fp & 7) == 0)) {
+ sh64_unwind_inner((struct pt_regs *) fp);
+ }
+}
+
+static void sh64_unwind_inner(struct pt_regs *regs)
+{
+ unsigned long pc, fp;
+ int ofs = 0;
+ int first_pass;
+
+ pc = regs->pc & ~1;
+ fp = regs->regs[14];
+
+ first_pass = 1;
+ for (;;) {
+ int cond;
+ unsigned long next_fp, next_pc;
+
+ if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ printk("SYSCALL\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ printk("SYSCALL (PREEMPTED)\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ /* In this case, the PC is discovered by lookup_prev_stack_frame but
+ it has 4 taken off it to look like the 'caller' */
+ if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ printk("EXCEPTION\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ printk("IRQ\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
+ ((pc & 3) == 0) && ((fp & 7) == 0));
+
+ pc -= ofs;
+
+ printk("[<%08lx>] ", pc);
+ print_symbol("%s\n", pc);
+
+ if (first_pass) {
+ /* If the innermost frame is a leaf function, it's
+ * possible that r18 is never saved out to the stack.
+ */
+ next_pc = regs->regs[18];
+ } else {
+ next_pc = 0;
+ }
+
+ if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
+ ofs = sizeof(unsigned long);
+ pc = next_pc & ~1;
+ fp = next_fp;
+ } else {
+ printk("Unable to lookup previous stack frame\n");
+ break;
+ }
+ first_pass = 0;
+ }
+
+ printk("\n");
+
+}
+
+void sh64_unwind(struct pt_regs *regs)
+{
+ if (!regs) {
+ /*
+ * Fetch current regs if we have no other saved state to back
+ * trace from.
+ */
+ regs = &here_regs;
+
+ __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
+ __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
+ __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
+
+ __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
+ __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
+ __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
+ __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
+ __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
+ __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
+ __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
+ __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
+
+ __asm__ __volatile__ (
+ "pta 0f, tr0\n\t"
+ "blink tr0, %0\n\t"
+ "0: nop"
+ : "=r" (regs->pc)
+ );
+ }
+
+ printk("\nCall Trace:\n");
+ sh64_unwind_inner(regs);
+}
+
diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
new file mode 100644
index 00000000000..4a8a4083ff0
--- /dev/null
+++ b/arch/sh/kernel/dump_task.c
@@ -0,0 +1,31 @@
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+
+/*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct pt_regs ptregs;
+
+ ptregs = *task_pt_regs(tsk);
+ elf_core_copy_regs(regs, &ptregs);
+
+ return 1;
+}
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
+{
+ int fpvalid = 0;
+
+#if defined(CONFIG_SH_FPU)
+ fpvalid = !!tsk_used_math(tsk);
+ if (fpvalid) {
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+ memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ }
+#endif
+
+ return fpvalid;
+}
+
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 2f30977558a..957f2561154 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -63,7 +63,8 @@ static struct console bios_console = {
#include <linux/serial_core.h>
#include "../../../drivers/serial/sh-sci.h"
-#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721)
#define EPK_SCSMR_VALUE 0x000
#define EPK_SCBRR_VALUE 0x00C
#define EPK_FIFO_SIZE 64
@@ -117,7 +118,8 @@ static struct console scif_console = {
};
#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721)
static void scif_sercon_init(char *s)
{
sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
@@ -208,10 +210,12 @@ static int __init setup_early_printk(char *buf)
if (!strncmp(buf, "serial", 6)) {
early_console = &scif_console;
-#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720)) && \
- !defined(CONFIG_SH_STANDARD_BIOS)
+#if !defined(CONFIG_SH_STANDARD_BIOS)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721)
scif_sercon_init(buf + 6);
#endif
+#endif
}
#endif
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index e0317ed080c..926b2e7b11c 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -176,25 +176,6 @@ work_notifysig:
jmp @r1
lds r0, pr
work_resched:
-#if defined(CONFIG_GUSA) && !defined(CONFIG_PREEMPT)
- ! gUSA handling
- mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
- mov r0, r1
- shll r0
- bf/s 1f
- shll r0
- bf/s 1f
- mov #OFF_PC, r0
- ! SP >= 0xc0000000 : gUSA mark
- mov.l @(r0,r15), r2 ! get user space PC (program counter)
- mov.l @(OFF_R0,r15), r3 ! end point
- cmp/hs r3, r2 ! r2 >= r3?
- bt 1f
- add r3, r1 ! rewind point #2
- mov.l r1, @(r0,r15) ! reset PC to rewind point #2
- !
-1:
-#endif
mov.l 1f, r1
jsr @r1 ! schedule
nop
@@ -224,7 +205,7 @@ work_resched:
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0
+ tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -234,6 +215,8 @@ syscall_exit_work:
#endif
sti
! XXX setup arguments...
+ mov r15, r4
+ mov #1, r5
mov.l 4f, r0 ! do_syscall_trace
jsr @r0
nop
@@ -244,6 +227,8 @@ syscall_exit_work:
syscall_trace_entry:
! Yes it is traced.
! XXX setup arguments...
+ mov r15, r4
+ mov #0, r5
mov.l 4f, r11 ! Call do_syscall_trace which notifies
jsr @r11 ! superior (will chomp R[0-7])
nop
@@ -366,7 +351,7 @@ ENTRY(system_call)
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
- mov #_TIF_SYSCALL_TRACE, r10
+ mov #(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
tst r10, r8
bf syscall_trace_entry
!
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head_32.S
index 3338239717f..d67d7ed09f2 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head_32.S
@@ -32,7 +32,11 @@ ENTRY(empty_zero_page)
.long 1 /* LOADER_TYPE */
.long 0x00360000 /* INITRD_START */
.long 0x000a0000 /* INITRD_SIZE */
- .long 0
+#ifdef CONFIG_32BIT
+ .long 0x53453f00 + 32 /* "SE?" = 32 bit */
+#else
+ .long 0x53453f00 + 29 /* "SE?" = 29 bit */
+#endif
1:
.skip PAGE_SIZE - empty_zero_page - 1b
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
new file mode 100644
index 00000000000..f42d4c0feb7
--- /dev/null
+++ b/arch/sh/kernel/head_64.S
@@ -0,0 +1,356 @@
+/*
+ * arch/sh/kernel/head_64.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/tlb.h>
+#include <asm/cpu/registers.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/thread_info.h>
+
+/*
+ * MMU defines: TLB boundaries.
+ */
+
+#define MMUIR_FIRST ITLB_FIXED
+#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUIR_STEP TLB_STEP
+
+#define MMUDR_FIRST DTLB_FIXED
+#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUDR_STEP TLB_STEP
+
+/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
+#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
+#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
+#endif
+
+/*
+ * MMU defines: Fixed TLBs.
+ */
+/* Deal safely with the case where the base of RAM is not 512Mb aligned */
+
+#define ALIGN_512M_MASK (0xffffffffe0000000)
+#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
+#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
+
+#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
+ /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+
+#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
+ /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
+
+#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
+ /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
+ /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
+
+#ifdef CONFIG_CACHE_OFF
+#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
+#else
+#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
+#endif
+#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
+
+#if defined (CONFIG_CACHE_OFF)
+#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
+#elif defined (CONFIG_CACHE_WRITETHROUGH)
+#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
+ /* WT, invalidate */
+#elif defined (CONFIG_CACHE_WRITEBACK)
+#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
+ /* WB, invalidate */
+#else
+#error preprocessor flag CONFIG_CACHE_... not recognized!
+#endif
+
+#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
+
+ .section .empty_zero_page, "aw"
+ .global empty_zero_page
+
+empty_zero_page:
+ .long 1 /* MOUNT_ROOT_RDONLY */
+ .long 0 /* RAMDISK_FLAGS */
+ .long 0x0200 /* ORIG_ROOT_DEV */
+ .long 1 /* LOADER_TYPE */
+ .long 0x00800000 /* INITRD_START */
+ .long 0x00800000 /* INITRD_SIZE */
+ .long 0
+
+ .text
+ .balign 4096,0,4096
+
+ .section .data, "aw"
+ .balign PAGE_SIZE
+
+ .section .data, "aw"
+ .balign PAGE_SIZE
+
+ .global mmu_pdtp_cache
+mmu_pdtp_cache:
+ .space PAGE_SIZE, 0
+
+ .global empty_bad_page
+empty_bad_page:
+ .space PAGE_SIZE, 0
+
+ .global empty_bad_pte_table
+empty_bad_pte_table:
+ .space PAGE_SIZE, 0
+
+ .global fpu_in_use
+fpu_in_use: .quad 0
+
+
+ .section .text.head, "ax"
+ .balign L1_CACHE_BYTES
+/*
+ * Condition at the entry of __stext:
+ * . Reset state:
+ * . SR.FD = 1 (FPU disabled)
+ * . SR.BL = 1 (Exceptions disabled)
+ * . SR.MD = 1 (Privileged Mode)
+ * . SR.MMU = 0 (MMU Disabled)
+ * . SR.CD = 0 (CTC User Visible)
+ * . SR.IMASK = Undefined (Interrupt Mask)
+ *
+ * Operations supposed to be performed by __stext:
+ * . prevent speculative fetch onto device memory while MMU is off
+ * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
+ * . first, save CPU state and set it to something harmless
+ * . any CPU detection and/or endianness settings (?)
+ * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
+ * . set initial TLB entries for cached and uncached regions
+ * (no fine granularity paging)
+ * . set initial cache state
+ * . enable MMU and caches
+ * . set CPU to a consistent state
+ * . registers (including stack pointer and current/KCR0)
+ * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
+ * at this stage. This is all to later Linux initialization steps.
+ * . initialize FPU
+ * . clear BSS
+ * . jump into start_kernel()
+ * . be prepared to hopeless start_kernel() returns.
+ *
+ */
+ .global _stext
+_stext:
+ /*
+ * Prevent speculative fetch on device memory due to
+ * uninitialized target registers.
+ */
+ ptabs/u ZERO, tr0
+ ptabs/u ZERO, tr1
+ ptabs/u ZERO, tr2
+ ptabs/u ZERO, tr3
+ ptabs/u ZERO, tr4
+ ptabs/u ZERO, tr5
+ ptabs/u ZERO, tr6
+ ptabs/u ZERO, tr7
+ synci
+
+ /*
+ * Read/Set CPU state. After this block:
+ * r29 = Initial SR
+ */
+ getcon SR, r29
+ movi SR_HARMLESS, r20
+ putcon r20, SR
+
+ /*
+ * Initialize EMI/LMI. To Be Done.
+ */
+
+ /*
+ * CPU detection and/or endianness settings (?). To Be Done.
+ * Pure PIC code here, please ! Just save state into r30.
+ * After this block:
+ * r30 = CPU type/Platform Endianness
+ */
+
+ /*
+ * Set initial TLB entries for cached and uncached regions.
+ * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
+ */
+ /* Clear ITLBs */
+ pta clear_ITLB, tr1
+ movi MMUIR_FIRST, r21
+ movi MMUIR_END, r22
+clear_ITLB:
+ putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
+ addi r21, MMUIR_STEP, r21
+ bne r21, r22, tr1
+
+ /* Clear DTLBs */
+ pta clear_DTLB, tr1
+ movi MMUDR_FIRST, r21
+ movi MMUDR_END, r22
+clear_DTLB:
+ putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
+ addi r21, MMUDR_STEP, r21
+ bne r21, r22, tr1
+
+ /* Map one big (512Mb) page for ITLB */
+ movi MMUIR_FIRST, r21
+ movi MMUIR_TEXT_L, r22 /* PTEL first */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
+ movi MMUIR_TEXT_H, r22 /* PTEH last */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
+
+ /* Map one big CACHED (512Mb) page for DTLB */
+ movi MMUDR_FIRST, r21
+ movi MMUDR_CACHED_L, r22 /* PTEL first */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
+ movi MMUDR_CACHED_H, r22 /* PTEH last */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
+
+#ifdef CONFIG_EARLY_PRINTK
+ /*
+ * Setup a DTLB translation for SCIF phys.
+ */
+ addi r21, MMUDR_STEP, r21
+ movi 0x0a03, r22 /* SCIF phys */
+ shori 0x0148, r22
+ putcfg r21, 1, r22 /* PTEL first */
+ movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
+ shori 0x0003, r22
+ putcfg r21, 0, r22 /* PTEH last */
+#endif
+
+ /*
+ * Set cache behaviours.
+ */
+ /* ICache */
+ movi ICCR_BASE, r21
+ movi ICCR0_INIT_VAL, r22
+ movi ICCR1_INIT_VAL, r23
+ putcfg r21, ICCR_REG0, r22
+ putcfg r21, ICCR_REG1, r23
+
+ /* OCache */
+ movi OCCR_BASE, r21
+ movi OCCR0_INIT_VAL, r22
+ movi OCCR1_INIT_VAL, r23
+ putcfg r21, OCCR_REG0, r22
+ putcfg r21, OCCR_REG1, r23
+
+
+ /*
+ * Enable Caches and MMU. Do the first non-PIC jump.
+ * Now head.S global variables, constants and externs
+ * can be used.
+ */
+ getcon SR, r21
+ movi SR_ENABLE_MMU, r22
+ or r21, r22, r21
+ putcon r21, SSR
+ movi hyperspace, r22
+ ori r22, 1, r22 /* Make it SHmedia, not required but..*/
+ putcon r22, SPC
+ synco
+ rte /* And now go into the hyperspace ... */
+hyperspace: /* ... that's the next instruction ! */
+
+ /*
+ * Set CPU to a consistent state.
+ * r31 = FPU support flag
+ * tr0/tr7 in use. Others give a chance to loop somewhere safe
+ */
+ movi start_kernel, r32
+ ori r32, 1, r32
+
+ ptabs r32, tr0 /* r32 = _start_kernel address */
+ pta/u hopeless, tr1
+ pta/u hopeless, tr2
+ pta/u hopeless, tr3
+ pta/u hopeless, tr4
+ pta/u hopeless, tr5
+ pta/u hopeless, tr6
+ pta/u hopeless, tr7
+ gettr tr1, r28 /* r28 = hopeless address */
+
+ /* Set initial stack pointer */
+ movi init_thread_union, SP
+ putcon SP, KCR0 /* Set current to init_task */
+ movi THREAD_SIZE, r22 /* Point to the end */
+ add SP, r22, SP
+
+ /*
+ * Initialize FPU.
+ * Keep FPU flag in r31. After this block:
+ * r31 = FPU flag
+ */
+ movi fpu_in_use, r31 /* Temporary */
+
+#ifdef CONFIG_SH_FPU
+ getcon SR, r21
+ movi SR_ENABLE_FPU, r22
+ and r21, r22, r22
+ putcon r22, SR /* Try to enable */
+ getcon SR, r22
+ xor r21, r22, r21
+ shlri r21, 15, r21 /* Supposedly 0/1 */
+ st.q r31, 0 , r21 /* Set fpu_in_use */
+#else
+ movi 0, r21
+ st.q r31, 0 , r21 /* Set fpu_in_use */
+#endif
+ or r21, ZERO, r31 /* Set FPU flag at last */
+
+#ifndef CONFIG_SH_NO_BSS_INIT
+/* Don't clear BSS if running on slow platforms such as an RTL simulation,
+ remote memory via SHdebug link, etc. For these the memory can be guaranteed
+ to be all zero on boot anyway. */
+ /*
+ * Clear bss
+ */
+ pta clear_quad, tr1
+ movi __bss_start, r22
+ movi _end, r23
+clear_quad:
+ st.q r22, 0, ZERO
+ addi r22, 8, r22
+ bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
+#endif
+ pta/u hopeless, tr1
+
+ /* Say bye to head.S but be prepared to wrongly get back ... */
+ blink tr0, LINK
+
+ /* If we ever get back here through LINK/tr1-tr7 */
+ pta/u hopeless, tr7
+
+hopeless:
+ /*
+ * Something's badly wrong here. Loop endlessly,
+ * there's nothing more we can do about it.
+ *
+ * Note on hopeless: it can be jumped into invariably
+ * before or after jumping into hyperspace. The only
+ * requirement is to be PIC called (PTA) before and
+ * any way (PTA/PTABS) after. According to Virtual
+ * to Physical mapping a simulator/emulator can easily
+ * tell where we came here from just looking at hopeless
+ * (PC) address.
+ *
+ * For debugging purposes:
+ * (r28) hopeless/loop address
+ * (r29) Original SR
+ * (r30) CPU type/Platform endianness
+ * (r31) FPU Support
+ * (r32) _start_kernel address
+ */
+ blink tr7, ZERO
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 4b449c4a6ba..f9bcc606127 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -11,8 +11,8 @@ static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct pt_regs fake_swapper_regs;
struct mm_struct init_mm = INIT_MM(init_mm);
-
EXPORT_SYMBOL(init_mm);
/*
@@ -22,7 +22,7 @@ EXPORT_SYMBOL(init_mm);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
+union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 501fe03e371..71c9fde2fd9 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -61,73 +61,6 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
}
EXPORT_SYMBOL(memset_io);
-void __raw_readsl(unsigned long addr, void *datap, int len)
-{
- u32 *data;
-
- for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
- *data++ = ctrl_inl(addr);
-
- if (likely(len >= (0x20 >> 2))) {
- int tmp2, tmp3, tmp4, tmp5, tmp6;
-
- __asm__ __volatile__(
- "1: \n\t"
- "mov.l @%7, r0 \n\t"
- "mov.l @%7, %2 \n\t"
-#ifdef CONFIG_CPU_SH4
- "movca.l r0, @%0 \n\t"
-#else
- "mov.l r0, @%0 \n\t"
-#endif
- "mov.l @%7, %3 \n\t"
- "mov.l @%7, %4 \n\t"
- "mov.l @%7, %5 \n\t"
- "mov.l @%7, %6 \n\t"
- "mov.l @%7, r7 \n\t"
- "mov.l @%7, r0 \n\t"
- "mov.l %2, @(0x04,%0) \n\t"
- "mov #0x20>>2, %2 \n\t"
- "mov.l %3, @(0x08,%0) \n\t"
- "sub %2, %1 \n\t"
- "mov.l %4, @(0x0c,%0) \n\t"
- "cmp/hi %1, %2 ! T if 32 > len \n\t"
- "mov.l %5, @(0x10,%0) \n\t"
- "mov.l %6, @(0x14,%0) \n\t"
- "mov.l r7, @(0x18,%0) \n\t"
- "mov.l r0, @(0x1c,%0) \n\t"
- "bf.s 1b \n\t"
- " add #0x20, %0 \n\t"
- : "=&r" (data), "=&r" (len),
- "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
- "=&r" (tmp5), "=&r" (tmp6)
- : "r"(addr), "0" (data), "1" (len)
- : "r0", "r7", "t", "memory");
- }
-
- for (; len != 0; len--)
- *data++ = ctrl_inl(addr);
-}
-EXPORT_SYMBOL(__raw_readsl);
-
-void __raw_writesl(unsigned long addr, const void *data, int len)
-{
- if (likely(len != 0)) {
- int tmp1;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "mov.l @%0+, %1 \n\t"
- "dt %3 \n\t"
- "bf.s 1b \n\t"
- " mov.l %1, @%4 \n\t"
- : "=&r" (data), "=&r" (tmp1)
- : "0" (data), "r" (len), "r"(addr)
- : "t", "memory");
- }
-}
-EXPORT_SYMBOL(__raw_writesl);
-
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return sh_mv.mv_ioport_map(port, nr);
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 142a4e5b7eb..b3d0a03b4c7 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -1,5 +1,15 @@
/* Kernel module help for SH.
+ SHcompact version by Kaz Kojima and Paul Mundt.
+
+ SHmedia bits:
+
+ Copyright 2004 SuperH (UK) Ltd
+ Author: Richard Curnow
+
+ Based on the sh version, and on code from the sh64-specific parts of
+ modutils, originally written by Richard Curnow and Ben Gaster.
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -21,12 +31,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
void *module_alloc(unsigned long size)
{
if (size == 0)
@@ -52,6 +56,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
+#ifdef CONFIG_SUPERH32
#define COPY_UNALIGNED_WORD(sw, tw, align) \
{ \
void *__s = &(sw), *__t = &(tw); \
@@ -74,6 +79,10 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
break; \
} \
}
+#else
+/* One thing SHmedia doesn't screw up! */
+#define COPY_UNALIGNED_WORD(sw, tw, align) { (tw) = (sw); }
+#endif
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -89,8 +98,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
uint32_t value;
int align;
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -102,17 +111,44 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym->st_value + rel[i].r_addend;
align = (int)location & 3;
+#ifdef CONFIG_SUPERH64
+ /* For text addresses, bit2 of the st_other field indicates
+ * whether the symbol is SHmedia (1) or SHcompact (0). If
+ * SHmedia, the LSB of the symbol needs to be asserted
+ * for the CPU to be in SHmedia mode when it starts executing
+ * the branch target. */
+ relocation |= (sym->st_other & 4);
+#endif
+
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_DIR32:
- COPY_UNALIGNED_WORD (*location, value, align);
+ COPY_UNALIGNED_WORD (*location, value, align);
value += relocation;
- COPY_UNALIGNED_WORD (value, *location, align);
+ COPY_UNALIGNED_WORD (value, *location, align);
break;
case R_SH_REL32:
- relocation = (relocation - (Elf32_Addr) location);
- COPY_UNALIGNED_WORD (*location, value, align);
+ relocation = (relocation - (Elf32_Addr) location);
+ COPY_UNALIGNED_WORD (*location, value, align);
value += relocation;
- COPY_UNALIGNED_WORD (value, *location, align);
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_SH_IMM_LOW16:
+ *location = (*location & ~0x3fffc00) |
+ ((relocation & 0xffff) << 10);
+ break;
+ case R_SH_IMM_MEDLOW16:
+ *location = (*location & ~0x3fffc00) |
+ (((relocation >> 16) & 0xffff) << 10);
+ break;
+ case R_SH_IMM_LOW16_PCREL:
+ relocation -= (Elf32_Addr) location;
+ *location = (*location & ~0x3fffc00) |
+ ((relocation & 0xffff) << 10);
+ break;
+ case R_SH_IMM_MEDLOW16_PCREL:
+ relocation -= (Elf32_Addr) location;
+ *location = (*location & ~0x3fffc00) |
+ (((relocation >> 16) & 0xffff) << 10);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process_32.c
index 6d7f2b07e49..9ab1926b9d1 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process_32.c
@@ -230,34 +230,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return fpvalid;
}
-/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs ptregs;
-
- ptregs = *task_pt_regs(tsk);
- elf_core_copy_regs(regs, &ptregs);
-
- return 1;
-}
-
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
-{
- int fpvalid = 0;
-
-#if defined(CONFIG_SH_FPU)
- fpvalid = !!tsk_used_math(tsk);
- if (fpvalid) {
- unlazy_fpu(tsk, task_pt_regs(tsk));
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
- }
-#endif
-
- return fpvalid;
-}
-
asmlinkage void ret_from_fork(void);
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
@@ -350,25 +322,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
unlazy_fpu(prev, task_pt_regs(prev));
#endif
-#if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT)
- {
- struct pt_regs *regs;
-
- preempt_disable();
- regs = task_pt_regs(prev);
- if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
- int offset = (int)regs->regs[15];
-
- /* Reset stack pointer: clear critical region mark */
- regs->regs[15] = regs->regs[1];
- if (regs->pc < regs->regs[0])
- /* Go to rewind point */
- regs->pc = regs->regs[0] + offset;
- }
- preempt_enable_no_resched();
- }
-#endif
-
#ifdef CONFIG_MMU
/*
* Restore the kernel mode register
@@ -510,49 +463,3 @@ asmlinkage void break_point_trap(void)
force_sig(SIGTRAP, current);
}
-
-/*
- * Generic trap handler.
- */
-asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-
- /* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-
- if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff,
- SIGTRAP) == NOTIFY_STOP)
- return;
-
- force_sig(SIGTRAP, current);
-}
-
-/*
- * Special handler for BUG() traps.
- */
-asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-
- /* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-
- if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
- SIGTRAP) == NOTIFY_STOP)
- return;
-
-#ifdef CONFIG_BUG
- if (__kernel_text_address(instruction_pointer(regs))) {
- u16 insn = *(u16 *)instruction_pointer(regs);
- if (insn == TRAPA_BUG_OPCODE)
- handle_BUG(regs);
- }
-#endif
-
- force_sig(SIGTRAP, current);
-}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
new file mode 100644
index 00000000000..cff3b7dc9c5
--- /dev/null
+++ b/arch/sh/kernel/process_64.c
@@ -0,0 +1,701 @@
+/*
+ * arch/sh/kernel/process_64.c
+ *
+ * This file handles the architecture-dependent parts of process handling..
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * Started from SH3/4 version:
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ *
+ * In turn started from i386 version:
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/ptrace.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+struct task_struct *last_task_used_math = NULL;
+
+static int hlt_counter = 1;
+
+#define HARD_IDLE_TIMEOUT (HZ / 3)
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+static int __init nohlt_setup(char *__unused)
+{
+ hlt_counter = 1;
+ return 1;
+}
+
+static int __init hlt_setup(char *__unused)
+{
+ hlt_counter = 0;
+ return 1;
+}
+
+__setup("nohlt", nohlt_setup);
+__setup("hlt", hlt_setup);
+
+static inline void hlt(void)
+{
+ __asm__ __volatile__ ("sleep" : : : "memory");
+}
+
+/*
+ * The idle loop on a uniprocessor SH..
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ if (hlt_counter) {
+ while (!need_resched())
+ cpu_relax();
+ } else {
+ local_irq_disable();
+ while (!need_resched()) {
+ local_irq_enable();
+ hlt();
+ local_irq_disable();
+ }
+ local_irq_enable();
+ }
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+
+}
+
+void machine_restart(char * __unused)
+{
+ extern void phys_stext(void);
+
+ phys_stext();
+}
+
+void machine_halt(void)
+{
+ for (;;);
+}
+
+void machine_power_off(void)
+{
+#if 0
+ /* Disable watchdog timer */
+ ctrl_outl(0xa5000000, WTCSR);
+ /* Configure deep standby on sleep */
+ ctrl_outl(0x03, STBCR);
+#endif
+
+ __asm__ __volatile__ (
+ "sleep\n\t"
+ "synci\n\t"
+ "nop;nop;nop;nop\n\t"
+ );
+
+ panic("Unexpected wakeup!\n");
+}
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+void show_regs(struct pt_regs * regs)
+{
+ unsigned long long ah, al, bh, bl, ch, cl;
+
+ printk("\n");
+
+ ah = (regs->pc) >> 32;
+ al = (regs->pc) & 0xffffffff;
+ bh = (regs->regs[18]) >> 32;
+ bl = (regs->regs[18]) & 0xffffffff;
+ ch = (regs->regs[15]) >> 32;
+ cl = (regs->regs[15]) & 0xffffffff;
+ printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->sr) >> 32;
+ al = (regs->sr) & 0xffffffff;
+ asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
+ asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
+ bh = (bh) >> 32;
+ bl = (bl) & 0xffffffff;
+ asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
+ asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
+ ch = (ch) >> 32;
+ cl = (cl) & 0xffffffff;
+ printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[0]) >> 32;
+ al = (regs->regs[0]) & 0xffffffff;
+ bh = (regs->regs[1]) >> 32;
+ bl = (regs->regs[1]) & 0xffffffff;
+ ch = (regs->regs[2]) >> 32;
+ cl = (regs->regs[2]) & 0xffffffff;
+ printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[3]) >> 32;
+ al = (regs->regs[3]) & 0xffffffff;
+ bh = (regs->regs[4]) >> 32;
+ bl = (regs->regs[4]) & 0xffffffff;
+ ch = (regs->regs[5]) >> 32;
+ cl = (regs->regs[5]) & 0xffffffff;
+ printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[6]) >> 32;
+ al = (regs->regs[6]) & 0xffffffff;
+ bh = (regs->regs[7]) >> 32;
+ bl = (regs->regs[7]) & 0xffffffff;
+ ch = (regs->regs[8]) >> 32;
+ cl = (regs->regs[8]) & 0xffffffff;
+ printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[9]) >> 32;
+ al = (regs->regs[9]) & 0xffffffff;
+ bh = (regs->regs[10]) >> 32;
+ bl = (regs->regs[10]) & 0xffffffff;
+ ch = (regs->regs[11]) >> 32;
+ cl = (regs->regs[11]) & 0xffffffff;
+ printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[12]) >> 32;
+ al = (regs->regs[12]) & 0xffffffff;
+ bh = (regs->regs[13]) >> 32;
+ bl = (regs->regs[13]) & 0xffffffff;
+ ch = (regs->regs[14]) >> 32;
+ cl = (regs->regs[14]) & 0xffffffff;
+ printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[16]) >> 32;
+ al = (regs->regs[16]) & 0xffffffff;
+ bh = (regs->regs[17]) >> 32;
+ bl = (regs->regs[17]) & 0xffffffff;
+ ch = (regs->regs[19]) >> 32;
+ cl = (regs->regs[19]) & 0xffffffff;
+ printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[20]) >> 32;
+ al = (regs->regs[20]) & 0xffffffff;
+ bh = (regs->regs[21]) >> 32;
+ bl = (regs->regs[21]) & 0xffffffff;
+ ch = (regs->regs[22]) >> 32;
+ cl = (regs->regs[22]) & 0xffffffff;
+ printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[23]) >> 32;
+ al = (regs->regs[23]) & 0xffffffff;
+ bh = (regs->regs[24]) >> 32;
+ bl = (regs->regs[24]) & 0xffffffff;
+ ch = (regs->regs[25]) >> 32;
+ cl = (regs->regs[25]) & 0xffffffff;
+ printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[26]) >> 32;
+ al = (regs->regs[26]) & 0xffffffff;
+ bh = (regs->regs[27]) >> 32;
+ bl = (regs->regs[27]) & 0xffffffff;
+ ch = (regs->regs[28]) >> 32;
+ cl = (regs->regs[28]) & 0xffffffff;
+ printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[29]) >> 32;
+ al = (regs->regs[29]) & 0xffffffff;
+ bh = (regs->regs[30]) >> 32;
+ bl = (regs->regs[30]) & 0xffffffff;
+ ch = (regs->regs[31]) >> 32;
+ cl = (regs->regs[31]) & 0xffffffff;
+ printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[32]) >> 32;
+ al = (regs->regs[32]) & 0xffffffff;
+ bh = (regs->regs[33]) >> 32;
+ bl = (regs->regs[33]) & 0xffffffff;
+ ch = (regs->regs[34]) >> 32;
+ cl = (regs->regs[34]) & 0xffffffff;
+ printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[35]) >> 32;
+ al = (regs->regs[35]) & 0xffffffff;
+ bh = (regs->regs[36]) >> 32;
+ bl = (regs->regs[36]) & 0xffffffff;
+ ch = (regs->regs[37]) >> 32;
+ cl = (regs->regs[37]) & 0xffffffff;
+ printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[38]) >> 32;
+ al = (regs->regs[38]) & 0xffffffff;
+ bh = (regs->regs[39]) >> 32;
+ bl = (regs->regs[39]) & 0xffffffff;
+ ch = (regs->regs[40]) >> 32;
+ cl = (regs->regs[40]) & 0xffffffff;
+ printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[41]) >> 32;
+ al = (regs->regs[41]) & 0xffffffff;
+ bh = (regs->regs[42]) >> 32;
+ bl = (regs->regs[42]) & 0xffffffff;
+ ch = (regs->regs[43]) >> 32;
+ cl = (regs->regs[43]) & 0xffffffff;
+ printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[44]) >> 32;
+ al = (regs->regs[44]) & 0xffffffff;
+ bh = (regs->regs[45]) >> 32;
+ bl = (regs->regs[45]) & 0xffffffff;
+ ch = (regs->regs[46]) >> 32;
+ cl = (regs->regs[46]) & 0xffffffff;
+ printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[47]) >> 32;
+ al = (regs->regs[47]) & 0xffffffff;
+ bh = (regs->regs[48]) >> 32;
+ bl = (regs->regs[48]) & 0xffffffff;
+ ch = (regs->regs[49]) >> 32;
+ cl = (regs->regs[49]) & 0xffffffff;
+ printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[50]) >> 32;
+ al = (regs->regs[50]) & 0xffffffff;
+ bh = (regs->regs[51]) >> 32;
+ bl = (regs->regs[51]) & 0xffffffff;
+ ch = (regs->regs[52]) >> 32;
+ cl = (regs->regs[52]) & 0xffffffff;
+ printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[53]) >> 32;
+ al = (regs->regs[53]) & 0xffffffff;
+ bh = (regs->regs[54]) >> 32;
+ bl = (regs->regs[54]) & 0xffffffff;
+ ch = (regs->regs[55]) >> 32;
+ cl = (regs->regs[55]) & 0xffffffff;
+ printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[56]) >> 32;
+ al = (regs->regs[56]) & 0xffffffff;
+ bh = (regs->regs[57]) >> 32;
+ bl = (regs->regs[57]) & 0xffffffff;
+ ch = (regs->regs[58]) >> 32;
+ cl = (regs->regs[58]) & 0xffffffff;
+ printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[59]) >> 32;
+ al = (regs->regs[59]) & 0xffffffff;
+ bh = (regs->regs[60]) >> 32;
+ bl = (regs->regs[60]) & 0xffffffff;
+ ch = (regs->regs[61]) >> 32;
+ cl = (regs->regs[61]) & 0xffffffff;
+ printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[62]) >> 32;
+ al = (regs->regs[62]) & 0xffffffff;
+ bh = (regs->tregs[0]) >> 32;
+ bl = (regs->tregs[0]) & 0xffffffff;
+ ch = (regs->tregs[1]) >> 32;
+ cl = (regs->tregs[1]) & 0xffffffff;
+ printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->tregs[2]) >> 32;
+ al = (regs->tregs[2]) & 0xffffffff;
+ bh = (regs->tregs[3]) >> 32;
+ bl = (regs->tregs[3]) & 0xffffffff;
+ ch = (regs->tregs[4]) >> 32;
+ cl = (regs->tregs[4]) & 0xffffffff;
+ printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->tregs[5]) >> 32;
+ al = (regs->tregs[5]) & 0xffffffff;
+ bh = (regs->tregs[6]) >> 32;
+ bl = (regs->tregs[6]) & 0xffffffff;
+ ch = (regs->tregs[7]) >> 32;
+ cl = (regs->tregs[7]) & 0xffffffff;
+ printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ /*
+ * If we're in kernel mode, dump the stack too..
+ */
+ if (!user_mode(regs)) {
+ void show_stack(struct task_struct *tsk, unsigned long *sp);
+ unsigned long sp = regs->regs[15] & 0xffffffff;
+ struct task_struct *tsk = get_current();
+
+ tsk->thread.kregs = regs;
+
+ show_stack(tsk, (unsigned long *)sp);
+ }
+}
+
+struct task_struct * alloc_task_struct(void)
+{
+ /* Get task descriptor pages */
+ return (struct task_struct *)
+ __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
+}
+
+void free_task_struct(struct task_struct *p)
+{
+ free_pages((unsigned long) p, get_order(THREAD_SIZE));
+}
+
+/*
+ * Create a kernel thread
+ */
+ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
+{
+ do_exit(fn(arg));
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be freed until both the parent and the child have exited.
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+ regs.regs[2] = (unsigned long)arg;
+ regs.regs[3] = (unsigned long)fn;
+
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.sr = (1 << 30);
+
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+ &regs, 0, NULL, NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /*
+ * See arch/sparc/kernel/process.c for the precedent for doing
+ * this -- RPC.
+ *
+ * The SH-5 FPU save/restore approach relies on
+ * last_task_used_math pointing to a live task_struct. When
+ * another task tries to use the FPU for the 1st time, the FPUDIS
+ * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
+ * existing FPU state to the FP regs field within
+ * last_task_used_math before re-loading the new task's FPU state
+ * (or initialising it if the FPU has been used before). So if
+ * last_task_used_math is stale, and its page has already been
+ * re-allocated for another use, the consequences are rather
+ * grim. Unless we null it here, there is no other path through
+ * which it would get safely nulled.
+ */
+#ifdef CONFIG_SH_FPU
+ if (last_task_used_math == current) {
+ last_task_used_math = NULL;
+ }
+#endif
+}
+
+void flush_thread(void)
+{
+
+ /* Called by fs/exec.c (flush_old_exec) to remove traces of a
+ * previously running executable. */
+#ifdef CONFIG_SH_FPU
+ if (last_task_used_math == current) {
+ last_task_used_math = NULL;
+ }
+ /* Force FPU state to be reinitialised after exec */
+ clear_used_math();
+#endif
+
+ /* if we are a kernel thread, about to change to user thread,
+ * update kreg
+ */
+ if(current->thread.kregs==&fake_swapper_regs) {
+ current->thread.kregs =
+ ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
+ current->thread.uregs = current->thread.kregs;
+ }
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+#ifdef CONFIG_SH_FPU
+ int fpvalid;
+ struct task_struct *tsk = current;
+
+ fpvalid = !!tsk_used_math(tsk);
+ if (fpvalid) {
+ if (current == last_task_used_math) {
+ enable_fpu();
+ save_fpu(tsk, regs);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ }
+
+ return fpvalid;
+#else
+ return 0; /* Task didn't use the fpu at all. */
+#endif
+}
+
+asmlinkage void ret_from_fork(void);
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+ unsigned long long se; /* Sign extension */
+
+#ifdef CONFIG_SH_FPU
+ if(last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current, regs);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+#endif
+ /* Copy from sh version */
+ childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
+
+ *childregs = *regs;
+
+ if (user_mode(regs)) {
+ childregs->regs[15] = usp;
+ p->thread.uregs = childregs;
+ } else {
+ childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ }
+
+ childregs->regs[9] = 0; /* Set return value for child */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.pc = (unsigned long) ret_from_fork;
+
+ /*
+ * Sign extend the edited stack.
+ * Note that thread.pc and thread.pc will stay
+ * 32-bit wide and context switch must take care
+ * of NEFF sign extension.
+ */
+
+ se = childregs->regs[15];
+ se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
+ childregs->regs[15] = se;
+
+ return 0;
+}
+
+asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ if (!newsp)
+ newsp = pregs->regs[15];
+ return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *ufilename, char **uargv,
+ char **uenvp, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ int error;
+ char *filename;
+
+ lock_kernel();
+ filename = getname((char __user *)ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename,
+ (char __user * __user *)uargv,
+ (char __user * __user *)uenvp,
+ pregs);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
+
+/*
+ * These bracket the sleeping functions..
+ */
+extern void interruptible_sleep_on(wait_queue_head_t *q);
+
+#define mid_sched ((unsigned long) interruptible_sleep_on)
+
+static int in_sh64_switch_to(unsigned long pc)
+{
+ extern char __sh64_switch_to_end;
+ /* For a sleeping task, the PC is somewhere in the middle of the function,
+ so we don't have to worry about masking the LSB off */
+ return (pc >= (unsigned long) sh64_switch_to) &&
+ (pc < (unsigned long) &__sh64_switch_to_end);
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long schedule_fp;
+ unsigned long sh64_switch_to_fp;
+ unsigned long schedule_caller_pc;
+ unsigned long pc;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * The same comment as on the Alpha applies here, too ...
+ */
+ pc = thread_saved_pc(p);
+
+#ifdef CONFIG_FRAME_POINTER
+ if (in_sh64_switch_to(pc)) {
+ sh64_switch_to_fp = (long) p->thread.sp;
+ /* r14 is saved at offset 4 in the sh64_switch_to frame */
+ schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
+
+ /* and the caller of 'schedule' is (currently!) saved at offset 24
+ in the frame of schedule (from disasm) */
+ schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
+ return schedule_caller_pc;
+ }
+#endif
+ return pc;
+}
+
+/* Provide a /proc/asids file that lists out the
+ ASIDs currently associated with the processes. (If the DM.PC register is
+ examined through the debug link, this shows ASID + PC. To make use of this,
+ the PID->ASID relationship needs to be known. This is primarily for
+ debugging.)
+ */
+
+#if defined(CONFIG_SH64_PROC_ASIDS)
+static int
+asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
+{
+ int len=0;
+ struct task_struct *p;
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ int pid = p->pid;
+
+ if (!pid)
+ continue;
+ if (p->mm)
+ len += sprintf(buf+len, "%5d : %02lx\n", pid,
+ asid_cache(smp_processor_id()));
+ else
+ len += sprintf(buf+len, "%5d : (none)\n", pid);
+ }
+ read_unlock(&tasklist_lock);
+ *eof = 1;
+ return len;
+}
+
+static int __init register_proc_asids(void)
+{
+ create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
+ return 0;
+}
+__initcall(register_proc_asids);
+#endif
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace_32.c
index ac725f0aeb7..ce0664a58b4 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -6,7 +6,7 @@
* edited by Linus Torvalds
*
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- *
+ * Audit support: Yuichi Nakamura <ynakam@hitachisoft.jp>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
+#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -248,15 +249,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage void do_syscall_trace(void)
+asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
struct task_struct *tsk = current;
+ if (unlikely(current->audit_context) && entryexit)
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
+ regs->regs[0]);
+
if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
!test_thread_flag(TIF_SINGLESTEP))
- return;
+ goto out;
if (!(tsk->ptrace & PT_PTRACED))
- return;
+ goto out;
+
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
@@ -271,4 +277,11 @@ asmlinkage void do_syscall_trace(void)
send_sig(tsk->exit_code, tsk, 1);
tsk->exit_code = 0;
}
+
+out:
+ if (unlikely(current->audit_context) && !entryexit)
+ audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[3],
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+
}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
new file mode 100644
index 00000000000..f6fbdfa6876
--- /dev/null
+++ b/arch/sh/kernel/ptrace_64.c
@@ -0,0 +1,341 @@
+/*
+ * arch/sh/kernel/ptrace_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * Started from SH3/4 version:
+ * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * Original x86 implementation:
+ * By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/audit.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+
+/* This mask defines the bits of the SR which the user is not allowed to
+ change, which are everything except S, Q, M, PR, SZ, FR. */
+#define SR_MASK (0xffff8cfd)
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * This routine will get a word from the user area in the process kernel stack.
+ */
+static inline int get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)(task->thread.uregs);
+ stack += offset;
+ return (*((int *)stack));
+}
+
+static inline unsigned long
+get_fpu_long(struct task_struct *task, unsigned long addr)
+{
+ unsigned long tmp;
+ struct pt_regs *regs;
+ regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+
+ if (!tsk_used_math(task)) {
+ if (addr == offsetof(struct user_fpu_struct, fpscr)) {
+ tmp = FPSCR_INIT;
+ } else {
+ tmp = 0xffffffffUL; /* matches initial value in fpu.c */
+ }
+ return tmp;
+ }
+
+ if (last_task_used_math == task) {
+ enable_fpu();
+ save_fpu(task, regs);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+ return tmp;
+}
+
+/*
+ * This routine will put a word into the user area in the process kernel stack.
+ */
+static inline int put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)(task->thread.uregs);
+ stack += offset;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+static inline int
+put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+ struct pt_regs *regs;
+
+ regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+
+ if (!tsk_used_math(task)) {
+ fpinit(&task->thread.fpu.hard);
+ set_stopped_child_used_math(task);
+ } else if (last_task_used_math == task) {
+ enable_fpu();
+ save_fpu(task, regs);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+ return 0;
+}
+
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ int ret;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0)
+ break;
+
+ if (addr < sizeof(struct pt_regs))
+ tmp = get_stack_long(child, addr);
+ else if ((addr >= offsetof(struct user, fpu)) &&
+ (addr < offsetof(struct user, u_fpvalid))) {
+ tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
+ tmp = !!tsk_used_math(child);
+ } else {
+ break;
+ }
+ ret = put_user(tmp, (unsigned long *)data);
+ break;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+ case PTRACE_POKEUSR:
+ /* write the word at location addr in the USER area. We must
+ disallow any changes to certain SR bits or u_fpvalid, since
+ this could crash the kernel or result in a security
+ loophole. */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0)
+ break;
+
+ if (addr < sizeof(struct pt_regs)) {
+ /* Ignore change of top 32 bits of SR */
+ if (addr == offsetof (struct pt_regs, sr)+4)
+ {
+ ret = 0;
+ break;
+ }
+ /* If lower 32 bits of SR, ignore non-user bits */
+ if (addr == offsetof (struct pt_regs, sr))
+ {
+ long cursr = get_stack_long(child, addr);
+ data &= ~(SR_MASK);
+ data |= (cursr & SR_MASK);
+ }
+ ret = put_stack_long(child, addr, data);
+ }
+ else if ((addr >= offsetof(struct user, fpu)) &&
+ (addr < offsetof(struct user, u_fpvalid))) {
+ ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+ }
+ break;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ ret = -EIO;
+ if (!valid_signal(data))
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ struct pt_regs *regs;
+
+ ret = -EIO;
+ if (!valid_signal(data))
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ if ((child->ptrace & PT_DTRACE) == 0) {
+ /* Spurious delayed TF traps may occur */
+ child->ptrace |= PT_DTRACE;
+ }
+
+ regs = child->thread.uregs;
+
+ regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
+
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+ return ret;
+}
+
+asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+{
+#define WPC_DBRMODE 0x0d104008
+ static int first_call = 1;
+
+ lock_kernel();
+ if (first_call) {
+ /* Set WPC.DBRMODE to 0. This makes all debug events get
+ * delivered through RESVEC, i.e. into the handlers in entry.S.
+ * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
+ * would normally be left set to 1, which makes debug events get
+ * delivered through DBRVEC, i.e. into the remote gdb's
+ * handlers. This prevents ptrace getting them, and confuses
+ * the remote gdb.) */
+ printk("DBRMODE set to 0 to permit native debugging\n");
+ poke_real_address_q(WPC_DBRMODE, 0);
+ first_call = 0;
+ }
+ unlock_kernel();
+
+ return sys_ptrace(request, pid, addr, data);
+}
+
+asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit)
+{
+ struct task_struct *tsk = current;
+
+ if (unlikely(current->audit_context) && entryexit)
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
+ regs->regs[9]);
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
+ !test_thread_flag(TIF_SINGLESTEP))
+ goto out;
+ if (!(tsk->ptrace & PT_PTRACED))
+ goto out;
+
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
+ !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (tsk->exit_code) {
+ send_sig(tsk->exit_code, tsk, 1);
+ tsk->exit_code = 0;
+ }
+
+out:
+ if (unlikely(current->audit_context) && !entryexit)
+ audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[1],
+ regs->regs[2], regs->regs[3],
+ regs->regs[4], regs->regs[5]);
+}
+
+/* Called with interrupts disabled */
+asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
+{
+ /* This is called after a single step exception (DEBUGSS).
+ There is no need to change the PC, as it is a post-execution
+ exception, as entry.S does not do anything to the PC for DEBUGSS.
+ We need to clear the Single Step setting in SR to avoid
+ continually stepping. */
+ local_irq_enable();
+ regs->sr &= ~SR_SSTEP;
+ force_sig(SIGTRAP, current);
+}
+
+/* Called with interrupts disabled */
+asmlinkage void do_software_break_point(unsigned long long vec,
+ struct pt_regs *regs)
+{
+ /* We need to forward step the PC, to counteract the backstep done
+ in signal.c. */
+ local_irq_enable();
+ force_sig(SIGTRAP, current);
+ regs->pc += 4;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* nothing to do.. */
+}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4156aac8c27..855cdf9d85b 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
+#include <asm/elf.h>
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -78,12 +79,25 @@ EXPORT_SYMBOL(memory_start);
unsigned long memory_end = 0;
EXPORT_SYMBOL(memory_end);
+int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
+
static int __init early_parse_mem(char *p)
{
unsigned long size;
- memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
+ memory_start = (unsigned long)__va(__MEMORY_START);
size = memparse(p, &p);
+
+ if (size > __MEMORY_SIZE) {
+ static char msg[] __initdata = KERN_ERR
+ "Using mem= to increase the size of kernel memory "
+ "is not allowed.\n"
+ " Recompile the kernel with the correct value for "
+ "CONFIG_MEMORY_SIZE.\n";
+ printk(msg);
+ return 0;
+ }
+
memory_end = memory_start + size;
return 0;
@@ -243,7 +257,7 @@ void __init setup_arch(char **cmdline_p)
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
- memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
+ memory_start = (unsigned long)__va(__MEMORY_START);
if (!memory_end)
memory_end = memory_start + __MEMORY_SIZE;
@@ -294,20 +308,23 @@ void __init setup_arch(char **cmdline_p)
}
static const char *cpu_name[] = {
+ [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
[CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
[CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
[CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
[CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
[CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
- [CPU_SH7729] = "SH7729", [CPU_SH7750] = "SH7750",
- [CPU_SH7750S] = "SH7750S", [CPU_SH7750R] = "SH7750R",
- [CPU_SH7751] = "SH7751", [CPU_SH7751R] = "SH7751R",
- [CPU_SH7760] = "SH7760",
+ [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
+ [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
+ [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
+ [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
[CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
- [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780",
- [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343",
- [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722",
- [CPU_SHX3] = "SH-X3", [CPU_SH_NONE] = "Unknown"
+ [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
+ [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
+ [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
+ [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
+ [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
+ [CPU_SH_NONE] = "Unknown"
};
const char *get_cpu_subtype(struct sh_cpuinfo *c)
@@ -410,7 +427,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
static void c_stop(struct seq_file *m, void *v)
{
}
-struct seq_operations cpuinfo_op = {
+const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms_32.c
index e1a6de9088b..e1a6de9088b 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
new file mode 100644
index 00000000000..8004c38d3d3
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -0,0 +1,55 @@
+/*
+ * arch/sh/kernel/sh_ksyms_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/rwsem.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/screen_info.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
+
+/* platform dependent support */
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(kernel_thread);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_trylock);
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__put_user_asm_l);
+EXPORT_SYMBOL(__get_user_asm_l);
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+
+/* Ugh. These come in from libgcc.a at link time. */
+#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+
+DECLARE_EXPORT(__sdivsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__udivsi3);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal_32.c
index ca754fd4243..f6b5fbfe75c 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal_32.c
@@ -507,24 +507,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
ctrl_inw(regs->pc - 4));
break;
}
-#ifdef CONFIG_GUSA
- } else {
- /* gUSA handling */
- preempt_disable();
-
- if (regs->regs[15] >= 0xc0000000) {
- int offset = (int)regs->regs[15];
-
- /* Reset stack pointer: clear critical region mark */
- regs->regs[15] = regs->regs[1];
- if (regs->pc < regs->regs[0])
- /* Go to rewind point #1 */
- regs->pc = regs->regs[0] + offset -
- instruction_size(ctrl_inw(regs->pc-4));
- }
-
- preempt_enable_no_resched();
-#endif
}
/* Set up the stack frame */
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
new file mode 100644
index 00000000000..80bde19d445
--- /dev/null
+++ b/arch/sh/kernel/signal_64.c
@@ -0,0 +1,751 @@
+/*
+ * arch/sh/kernel/signal_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#define REG_RET 9
+#define REG_ARG1 2
+#define REG_ARG2 3
+#define REG_ARG3 4
+#define REG_SP 15
+#define REG_PR 18
+#define REF_REG_RET regs->regs[REG_RET]
+#define REF_REG_SP regs->regs[REG_SP]
+#define DEREF_REG_PR regs->regs[REG_PR]
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask,
+ unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ REF_REG_RET = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ regs->pc += 4; /* because sys_sigreturn decrements the pc */
+ if (do_signal(regs, &saveset)) {
+ /* pc now points at signal handler. Need to decrement
+ it because entry.S will increment it. */
+ regs->pc -= 4;
+ return -EINTR;
+ }
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7,
+ struct pt_regs * regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ REF_REG_RET = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ regs->pc += 4; /* because sys_sigreturn decrements the pc */
+ if (do_signal(regs, &saveset)) {
+ /* pc now points at signal handler. Need to decrement
+ it because entry.S will increment it. */
+ regs->pc -= 4;
+ return -EINTR;
+ }
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7,
+ struct pt_regs * regs)
+{
+ return do_sigaltstack(uss, uoss, REF_REG_SP);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ long long retcode[2];
+};
+
+struct rt_sigframe
+{
+ struct siginfo __user *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ long long retcode[2];
+};
+
+#ifdef CONFIG_SH_FPU
+static inline int
+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int fpvalid;
+
+ err |= __get_user (fpvalid, &sc->sc_fpvalid);
+ conditional_used_math(fpvalid);
+ if (! fpvalid)
+ return err;
+
+ if (current == last_task_used_math) {
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+ (sizeof(long long) * 32) + (sizeof(int) * 1));
+
+ return err;
+}
+
+static inline int
+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int fpvalid;
+
+ fpvalid = !!used_math();
+ err |= __put_user(fpvalid, &sc->sc_fpvalid);
+ if (! fpvalid)
+ return err;
+
+ if (current == last_task_used_math) {
+ enable_fpu();
+ save_fpu(current, regs);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+ (sizeof(long long) * 32) + (sizeof(int) * 1));
+ clear_used_math();
+
+ return err;
+}
+#else
+static inline int
+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ return 0;
+}
+static inline int
+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ return 0;
+}
+#endif
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
+{
+ unsigned int err = 0;
+ unsigned long long current_sr, new_sr;
+#define SR_MASK 0xffff8cfd
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+
+ COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
+ COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
+ COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
+ COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
+ COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
+ COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
+ COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
+ COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
+ COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
+ COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
+ COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
+ COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
+ COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
+ COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
+ COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
+
+ /* Prevent the signal handler manipulating SR in a way that can
+ crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
+ modified */
+ current_sr = regs->sr;
+ err |= __get_user(new_sr, &sc->sc_sr);
+ regs->sr &= SR_MASK;
+ regs->sr |= (new_sr & ~SR_MASK);
+
+ COPY(pc);
+
+#undef COPY
+
+ /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
+ * has been restored above.) */
+ err |= restore_sigcontext_fpu(regs, sc);
+
+ regs->syscall_nr = -1; /* disable syscall checks */
+ err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
+ return err;
+}
+
+asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
+ sigset_t set;
+ long long ret;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, &ret))
+ goto badframe;
+ regs->pc -= 4;
+
+ return (int) ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
+ sigset_t set;
+ stack_t __user st;
+ long long ret;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
+ goto badframe;
+ regs->pc -= 4;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, REF_REG_SP);
+
+ return (int) ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+ /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
+ err |= setup_sigcontext_fpu(regs, sc);
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+
+ COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
+ COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
+ COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
+ COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
+ COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
+ COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
+ COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
+ COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
+ COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
+ COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
+ COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
+ COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
+ COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
+ COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
+ COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
+ COPY(sr); COPY(pc);
+
+#undef COPY
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void __user *)((sp - frame_size) & -8ul);
+}
+
+void sa_default_restorer(void); /* See comments below */
+void sa_default_rt_restorer(void); /* See comments below */
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask)); }
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+
+ /*
+ * On SH5 all edited pointers are subject to NEFF
+ */
+ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+ (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ } else {
+ /*
+ * Different approach on SH5.
+ * . Endianness independent asm code gets placed in entry.S .
+ * This is limited to four ASM instructions corresponding
+ * to two long longs in size.
+ * . err checking is done on the else branch only
+ * . flush_icache_range() is called upon __put_user() only
+ * . all edited pointers are subject to NEFF
+ * . being code, linker turns ShMedia bit on, always
+ * dereference index -1.
+ */
+ DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
+ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+ (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+
+ if (__copy_to_user(frame->retcode,
+ (unsigned long long)sa_default_restorer & (~1), 16) != 0)
+ goto give_sigsegv;
+
+ /* Cohere the trampoline with the I-cache. */
+ flush_cache_sigtramp(DEREF_REG_PR-1);
+ }
+
+ /*
+ * Set up registers for signal handler.
+ * All edited pointers are subject to NEFF.
+ */
+ regs->regs[REG_SP] = (unsigned long) frame;
+ regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
+ (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+
+ /* FIXME:
+ The glibc profiling support for SH-5 needs to be passed a sigcontext
+ so it can retrieve the PC. At some point during 2003 the glibc
+ support was changed to receive the sigcontext through the 2nd
+ argument, but there are still versions of libc.so in use that use
+ the 3rd argument. Until libc.so is stabilised, pass the sigcontext
+ through both 2nd and 3rd arguments.
+ */
+
+ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+ regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ /* Broken %016Lx */
+ printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal,
+ current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+
+ /*
+ * On SH5 all edited pointers are subject to NEFF
+ */
+ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+ (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ } else {
+ /*
+ * Different approach on SH5.
+ * . Endianness independent asm code gets placed in entry.S .
+ * This is limited to four ASM instructions corresponding
+ * to two long longs in size.
+ * . err checking is done on the else branch only
+ * . flush_icache_range() is called upon __put_user() only
+ * . all edited pointers are subject to NEFF
+ * . being code, linker turns ShMedia bit on, always
+ * dereference index -1.
+ */
+
+ DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
+ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+ (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+
+ if (__copy_to_user(frame->retcode,
+ (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
+ goto give_sigsegv;
+
+ flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
+ }
+
+ /*
+ * Set up registers for signal handler.
+ * All edited pointers are subject to NEFF.
+ */
+ regs->regs[REG_SP] = (unsigned long) frame;
+ regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
+ (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
+ regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ /* Broken %016Lx */
+ printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal,
+ current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Are we from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->regs[REG_RET] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->regs[REG_RET] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ /* Decode syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
+ regs->pc -= 4;
+ }
+ }
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
+ if (try_to_freeze())
+ goto no_signal;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, 0);
+
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+
+ /*
+ * If a signal was successfully delivered, the saved sigmask
+ * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
+ * flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ return 1;
+ }
+
+no_signal:
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ /* Decode Syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
+ regs->pc -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[REG_RET] = __NR_restart_syscall;
+ regs->pc -= 4;
+ break;
+ }
+ }
+
+ /* No signal to deliver -- put the saved sigmask back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
+ return 0;
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index d545a686a20..59cd2859ce9 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -7,7 +7,6 @@
*
* Taken from i386 version.
*/
-
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -27,28 +26,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way Unix traditionally does this, though.
- */
-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- regs->regs[1] = fd[1];
- return fd[0];
- }
- return error;
-}
-
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-
EXPORT_SYMBOL(shm_align_mask);
#ifdef CONFIG_MMU
@@ -140,7 +118,7 @@ full_search:
#endif /* CONFIG_MMU */
static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, int fd, unsigned long pgoff)
{
int error = -EBADF;
@@ -195,12 +173,13 @@ asmlinkage int sys_ipc(uint call, int first, int second,
if (call <= SEMCTL)
switch (call) {
case SEMOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ return sys_semtimedop(first,
+ (struct sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second,
- (const struct timespec __user *)fifth);
+ return sys_semtimedop(first,
+ (struct sembuf __user *)ptr, second,
+ (const struct timespec __user *)fifth);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
@@ -215,25 +194,28 @@ asmlinkage int sys_ipc(uint call, int first, int second,
return -EINVAL;
}
- if (call <= MSGCTL)
+ if (call <= MSGCTL)
switch (call) {
case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+ return sys_msgsnd (first, (struct msgbuf __user *) ptr,
second, third);
case MSGRCV:
switch (version) {
- case 0: {
+ case 0:
+ {
struct ipc_kludge tmp;
+
if (!ptr)
return -EINVAL;
-
+
if (copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
+ (struct ipc_kludge __user *) ptr,
sizeof (tmp)))
return -EFAULT;
+
return sys_msgrcv (first, tmp.msgp, second,
tmp.msgtyp, third);
- }
+ }
default:
return sys_msgrcv (first,
(struct msgbuf __user *) ptr,
@@ -247,7 +229,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
default:
return -EINVAL;
}
- if (call <= SHMCTL)
+ if (call <= SHMCTL)
switch (call) {
case SHMAT:
switch (version) {
@@ -265,7 +247,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
return do_shmat (first, (char __user *) ptr,
second, (ulong *) third);
}
- case SHMDT:
+ case SHMDT:
return sys_shmdt ((char __user *)ptr);
case SHMGET:
return sys_shmget (first, second, third);
@@ -275,7 +257,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
default:
return -EINVAL;
}
-
+
return -EINVAL;
}
@@ -289,49 +271,3 @@ asmlinkage int sys_uname(struct old_utsname * name)
up_read(&uts_sem);
return err?-EFAULT:0;
}
-
-asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
- size_t count, long dummy, loff_t pos)
-{
- return sys_pread64(fd, buf, count, pos);
-}
-
-asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
- size_t count, long dummy, loff_t pos)
-{
- return sys_pwrite64(fd, buf, count, pos);
-}
-
-asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
- u32 len0, u32 len1, int advice)
-{
-#ifdef __LITTLE_ENDIAN__
- return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
- (u64)len1 << 32 | len0, advice);
-#else
- return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
- (u64)len0 << 32 | len1, advice);
-#endif
-}
-
-#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
-#define SYSCALL_ARG3 "trapa #0x23"
-#else
-#define SYSCALL_ARG3 "trapa #0x13"
-#endif
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register long __sc0 __asm__ ("r3") = __NR_execve;
- register long __sc4 __asm__ ("r4") = (long) filename;
- register long __sc5 __asm__ ("r5") = (long) argv;
- register long __sc6 __asm__ ("r6") = (long) envp;
- __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
- : "memory");
- return __sc0;
-}
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
new file mode 100644
index 00000000000..125e493ead8
--- /dev/null
+++ b/arch/sh/kernel/sys_sh32.c
@@ -0,0 +1,84 @@
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/ipc.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ regs->regs[1] = fd[1];
+ return fd[0];
+ }
+ return error;
+}
+
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
+ size_t count, long dummy, loff_t pos)
+{
+ return sys_pread64(fd, buf, count, pos);
+}
+
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
+ size_t count, long dummy, loff_t pos)
+{
+ return sys_pwrite64(fd, buf, count, pos);
+}
+
+asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ u32 len0, u32 len1, int advice)
+{
+#ifdef __LITTLE_ENDIAN__
+ return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
+ (u64)len1 << 32 | len0, advice);
+#else
+ return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
+ (u64)len0 << 32 | len1, advice);
+#endif
+}
+
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define SYSCALL_ARG3 "trapa #0x23"
+#else
+#define SYSCALL_ARG3 "trapa #0x13"
+#endif
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register long __sc0 __asm__ ("r3") = __NR_execve;
+ register long __sc4 __asm__ ("r4") = (long) filename;
+ register long __sc5 __asm__ ("r5") = (long) argv;
+ register long __sc6 __asm__ ("r6") = (long) envp;
+ __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
+ : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
+ : "memory");
+ return __sc0;
+}
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
new file mode 100644
index 00000000000..578004d71e0
--- /dev/null
+++ b/arch/sh/kernel/sys_sh64.c
@@ -0,0 +1,66 @@
+/*
+ * arch/sh/kernel/sys_sh64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/SH5
+ * platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/syscalls.h>
+#include <linux/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
+ register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
+ register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
+ register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
+ __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
+ : "=r" (__sc0)
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
+ __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
+ : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
+ return __sc0;
+}
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls_32.S
index 10bec45415b..10bec45415b 100644
--- a/arch/sh/kernel/syscalls.S
+++ b/arch/sh/kernel/syscalls_32.S
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
new file mode 100644
index 00000000000..98a93efe369
--- /dev/null
+++ b/arch/sh/kernel/syscalls_64.S
@@ -0,0 +1,381 @@
+/*
+ * arch/sh/kernel/syscalls_64.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+
+ .section .data, "aw"
+ .balign 32
+
+/*
+ * System calls jump table
+ */
+ .globl sys_call_table
+sys_call_table:
+ .long sys_restart_syscall /* 0 - old "setup()" system call */
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sh64_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys( */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* sys_olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* sys_oldselect */
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm */
+ .long sys_socketcall /* Obsolete implementation of socket syscall */
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_ni_syscall /* 110 */ /* iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall /* idle */
+ .long sys_ni_syscall /* vm86old */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc /* Obsolete ipc syscall implementation */
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* vm86 */
+ .long sys_ni_syscall /* old "query_module" */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ /* Broken-out socket family (maintain backwards compatibility in syscall
+ numbering with 2.4) */
+ .long sys_socket /* 220 */
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname /* 225 */
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_sendto
+ .long sys_recv /* 230*/
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg /* 235 */
+ .long sys_recvmsg
+ /* Broken-out IPC family (maintain backwards compatibility in syscall
+ numbering with 2.4) */
+ .long sys_semop
+ .long sys_semget
+ .long sys_semctl
+ .long sys_msgsnd /* 240 */
+ .long sys_msgrcv
+ .long sys_msgget
+ .long sys_msgctl
+ .long sys_shmat
+ .long sys_shmdt /* 245 */
+ .long sys_shmget
+ .long sys_shmctl
+ /* Rest of syscalls listed in 2.4 i386 unistd.h */
+ .long sys_getdents64
+ .long sys_fcntl64
+ .long sys_ni_syscall /* 250 reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead
+ .long sys_setxattr
+ .long sys_lsetxattr /* 255 */
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr /* 260 */
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr /* 265 */
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity /* 270 */
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents /* 275 */
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64
+ .long sys_ni_syscall
+ .long sys_exit_group /* 280 */
+ /* Rest of new 2.6 syscalls */
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl
+ .long sys_epoll_wait
+ .long sys_remap_file_pages /* 285 */
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun /* 290 */
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime
+ .long sys_clock_getres
+ .long sys_clock_nanosleep /* 295 */
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill
+ .long sys_utimes
+ .long sys_fadvise64_64 /* 300 */
+ .long sys_ni_syscall /* Reserved for vserver */
+ .long sys_ni_syscall /* Reserved for mbind */
+ .long sys_ni_syscall /* get_mempolicy */
+ .long sys_ni_syscall /* set_mempolicy */
+ .long sys_mq_open /* 305 */
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify
+ .long sys_mq_getsetattr /* 310 */
+ .long sys_ni_syscall /* Reserved for kexec */
+ .long sys_waitid
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl /* 315 */
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch /* 320 */
+ .long sys_ni_syscall
+ .long sys_migrate_pages
+ .long sys_openat
+ .long sys_mkdirat
+ .long sys_mknodat /* 325 */
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64
+ .long sys_unlinkat
+ .long sys_renameat /* 330 */
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat /* 335 */
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 340 */
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee
+ .long sys_vmsplice
+ .long sys_move_pages /* 345 */
+ .long sys_getcpu
+ .long sys_epoll_pwait
+ .long sys_utimensat
+ .long sys_signalfd
+ .long sys_timerfd /* 350 */
+ .long sys_eventfd
+ .long sys_fallocate
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time_32.c
index a3a67d151e5..2bc04bfee73 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time_32.c
@@ -174,7 +174,7 @@ int timer_resume(struct sys_device *dev)
#endif
static struct sysdev_class timer_sysclass = {
- set_kset_name("timer"),
+ .name = "timer",
.suspend = timer_suspend,
.resume = timer_resume,
};
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
new file mode 100644
index 00000000000..f819ba38a6c
--- /dev/null
+++ b/arch/sh/kernel/time_64.c
@@ -0,0 +1,519 @@
+/*
+ * arch/sh/kernel/time_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003 Richard Curnow
+ *
+ * Original TMU/RTC code taken from sh version.
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Some code taken from i386 version.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/bcd.h>
+#include <linux/timex.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <asm/cpu/registers.h> /* required by inline __asm__ stmt. */
+#include <asm/cpu/irq.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+
+#define TMU_TOCR_INIT 0x00
+#define TMU0_TCR_INIT 0x0020
+#define TMU_TSTR_INIT 1
+#define TMU_TSTR_OFF 0
+
+/* Real Time Clock */
+#define RTC_BLOCK_OFF 0x01040000
+#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
+#define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */
+#define RTC_RCR1 (rtc_base + 0x38)
+
+/* Clock, Power and Reset Controller */
+#define CPRC_BLOCK_OFF 0x01010000
+#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
+
+#define FRQCR (cprc_base+0x0)
+#define WTCSR (cprc_base+0x0018)
+#define STBCR (cprc_base+0x0030)
+
+/* Time Management Unit */
+#define TMU_BLOCK_OFF 0x01020000
+#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
+#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
+#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
+#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
+
+#define TMU_TOCR tmu_base+0x0 /* Byte access */
+#define TMU_TSTR tmu_base+0x4 /* Byte access */
+
+#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
+#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
+#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+static unsigned long tmu_base, rtc_base;
+unsigned long cprc_base;
+
+/* Variables to allow interpolation of time of day to resolution better than a
+ * jiffy. */
+
+/* This is effectively protected by xtime_lock */
+static unsigned long ctc_last_interrupt;
+static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
+
+#define CTC_JIFFY_SCALE_SHIFT 40
+
+/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
+static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
+
+/* Estimate number of microseconds that have elapsed since the last timer tick,
+ by scaling the delta that has occurred in the CTC register.
+
+ WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
+ the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
+ in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
+ probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
+ sleeping, though will be coarser.
+
+ FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
+ is running or if the freq or tick arguments of adjtimex are modified after
+ we have calibrated the scaling factor? This will result in either a jump at
+ the end of a tick period, or a wrap backwards at the start of the next one,
+ if the application is reading the time of day often enough. I think we
+ ought to do better than this. For this reason, usecs_per_jiffy is left
+ separated out in the calculation below. This allows some future hook into
+ the adjtime-related stuff in kernel/timer.c to remove this hazard.
+
+*/
+
+static unsigned long usecs_since_tick(void)
+{
+ unsigned long long current_ctc;
+ long ctc_ticks_since_interrupt;
+ unsigned long long ull_ctc_ticks_since_interrupt;
+ unsigned long result;
+
+ unsigned long long mul1_out;
+ unsigned long long mul1_out_high;
+ unsigned long long mul2_out_low, mul2_out_high;
+
+ /* Read CTC register */
+ asm ("getcon cr62, %0" : "=r" (current_ctc));
+ /* Note, the CTC counts down on each CPU clock, not up.
+ Note(2), use long type to get correct wraparound arithmetic when
+ the counter crosses zero. */
+ ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
+ ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
+
+ /* Inline assembly to do 32x32x32->64 multiplier */
+ asm volatile ("mulu.l %1, %2, %0" :
+ "=r" (mul1_out) :
+ "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
+
+ mul1_out_high = mul1_out >> 32;
+
+ asm volatile ("mulu.l %1, %2, %0" :
+ "=r" (mul2_out_low) :
+ "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
+
+#if 1
+ asm volatile ("mulu.l %1, %2, %0" :
+ "=r" (mul2_out_high) :
+ "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
+#endif
+
+ result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
+
+ return result;
+}
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long seq;
+ unsigned long usec, sec;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ usec = usecs_since_tick();
+ sec = xtime.tv_sec;
+ usec += xtime.tv_nsec / 1000;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ nsec -= 1000 * usecs_since_tick();
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ ntp_clear();
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+
+ return 0;
+}
+EXPORT_SYMBOL(do_settimeofday);
+
+/* Dummy RTC ops */
+static void null_rtc_get_time(struct timespec *tv)
+{
+ tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
+ tv->tv_nsec = 0;
+}
+
+static int null_rtc_set_time(const time_t secs)
+{
+ return 0;
+}
+
+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
+
+/* last time the RTC clock got updated */
+static long last_rtc_update;
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(void)
+{
+ unsigned long long current_ctc;
+ asm ("getcon cr62, %0" : "=r" (current_ctc));
+ ctc_last_interrupt = (unsigned long) current_ctc;
+
+ do_timer(1);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+ if (current->pid)
+ profile_tick(CPU_PROFILING);
+
+#ifdef CONFIG_HEARTBEAT
+ if (sh_mv.mv_heartbeat != NULL)
+ sh_mv.mv_heartbeat();
+#endif
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if (ntp_synced() &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+ if (rtc_sh_set_time(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ /* do it again in 60 s */
+ last_rtc_update = xtime.tv_sec - 600;
+ }
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ unsigned long timer_status;
+
+ /* Clear UNF bit */
+ timer_status = ctrl_inw(TMU0_TCR);
+ timer_status &= ~0x100;
+ ctrl_outw(timer_status, TMU0_TCR);
+
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_lock(&xtime_lock);
+ do_timer_interrupt();
+ write_unlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+
+static __init unsigned int get_cpu_hz(void)
+{
+ unsigned int count;
+ unsigned long __dummy;
+ unsigned long ctc_val_init, ctc_val;
+
+ /*
+ ** Regardless the toolchain, force the compiler to use the
+ ** arbitrary register r3 as a clock tick counter.
+ ** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
+ */
+ register unsigned long long __rtc_irq_flag __asm__ ("r3");
+
+ local_irq_enable();
+ do {} while (ctrl_inb(rtc_base) != 0);
+ ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
+
+ /*
+ * r3 is arbitrary. CDC does not support "=z".
+ */
+ ctc_val_init = 0xffffffff;
+ ctc_val = ctc_val_init;
+
+ asm volatile("gettr tr0, %1\n\t"
+ "putcon %0, " __CTC "\n\t"
+ "and %2, r63, %2\n\t"
+ "pta $+4, tr0\n\t"
+ "beq/l %2, r63, tr0\n\t"
+ "ptabs %1, tr0\n\t"
+ "getcon " __CTC ", %0\n\t"
+ : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
+ : "0" (0));
+ local_irq_disable();
+ /*
+ * SH-3:
+ * CPU clock = 4 stages * loop
+ * tst rm,rm if id ex
+ * bt/s 1b if id ex
+ * add #1,rd if id ex
+ * (if) pipe line stole
+ * tst rm,rm if id ex
+ * ....
+ *
+ *
+ * SH-4:
+ * CPU clock = 6 stages * loop
+ * I don't know why.
+ * ....
+ *
+ * SH-5:
+ * Use CTC register to count. This approach returns the right value
+ * even if the I-cache is disabled (e.g. whilst debugging.)
+ *
+ */
+
+ count = ctc_val_init - ctc_val; /* CTC counts down */
+
+ /*
+ * This really is count by the number of clock cycles
+ * by the ratio between a complete R64CNT
+ * wrap-around (128) and CUI interrupt being raised (64).
+ */
+ return count*2;
+}
+
+static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */
+ regs->regs[3] = 1; /* Using r3 */
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .mask = CPU_MASK_NONE,
+ .name = "timer",
+};
+static struct irqaction irq1 = {
+ .handler = sh64_rtc_interrupt,
+ .flags = IRQF_DISABLED,
+ .mask = CPU_MASK_NONE,
+ .name = "rtc",
+};
+
+void __init time_init(void)
+{
+ unsigned int cpu_clock, master_clock, bus_clock, module_clock;
+ unsigned long interval;
+ unsigned long frqcr, ifc, pfc;
+ static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
+#define bfc_table ifc_table /* Same */
+#define pfc_table ifc_table /* Same */
+
+ tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
+ if (!tmu_base) {
+ panic("Unable to remap TMU\n");
+ }
+
+ rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
+ if (!rtc_base) {
+ panic("Unable to remap RTC\n");
+ }
+
+ cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
+ if (!cprc_base) {
+ panic("Unable to remap CPRC\n");
+ }
+
+ rtc_sh_get_time(&xtime);
+
+ setup_irq(TIMER_IRQ, &irq0);
+ setup_irq(RTC_IRQ, &irq1);
+
+ /* Check how fast it is.. */
+ cpu_clock = get_cpu_hz();
+
+ /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
+ scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
+
+ free_irq(RTC_IRQ, NULL);
+
+ printk("CPU clock: %d.%02dMHz\n",
+ (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
+ {
+ unsigned short bfc;
+ frqcr = ctrl_inl(FRQCR);
+ ifc = ifc_table[(frqcr>> 6) & 0x0007];
+ bfc = bfc_table[(frqcr>> 3) & 0x0007];
+ pfc = pfc_table[(frqcr>> 12) & 0x0007];
+ master_clock = cpu_clock * ifc;
+ bus_clock = master_clock/bfc;
+ }
+
+ printk("Bus clock: %d.%02dMHz\n",
+ (bus_clock/1000000), (bus_clock % 1000000)/10000);
+ module_clock = master_clock/pfc;
+ printk("Module clock: %d.%02dMHz\n",
+ (module_clock/1000000), (module_clock % 1000000)/10000);
+ interval = (module_clock/(HZ*4));
+
+ printk("Interval = %ld\n", interval);
+
+ current_cpu_data.cpu_clock = cpu_clock;
+ current_cpu_data.master_clock = master_clock;
+ current_cpu_data.bus_clock = bus_clock;
+ current_cpu_data.module_clock = module_clock;
+
+ /* Start TMU0 */
+ ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
+ ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
+ ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
+ ctrl_outl(interval, TMU0_TCOR);
+ ctrl_outl(interval, TMU0_TCNT);
+ ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
+}
+
+void enter_deep_standby(void)
+{
+ /* Disable watchdog timer */
+ ctrl_outl(0xa5000000, WTCSR);
+ /* Configure deep standby on sleep */
+ ctrl_outl(0x03, STBCR);
+
+#ifdef CONFIG_SH_ALPHANUMERIC
+ {
+ extern void mach_alphanum(int position, unsigned char value);
+ extern void mach_alphanum_brightness(int setting);
+ char halted[] = "Halted. ";
+ int i;
+ mach_alphanum_brightness(6); /* dimmest setting above off */
+ for (i=0; i<8; i++) {
+ mach_alphanum(i, halted[i]);
+ }
+ asm __volatile__ ("synco");
+ }
+#endif
+
+ asm __volatile__ ("sleep");
+ asm __volatile__ ("synci");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ panic("Unexpected wakeup!\n");
+}
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ /* RTC base, filled in by rtc_init */
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = IRQ_PRI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = IRQ_CUI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = IRQ_ATI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static int __init rtc_init(void)
+{
+ rtc_resources[0].start = rtc_base;
+ rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1;
+
+ return platform_device_register(&rtc_device);
+}
+device_initcall(rtc_init);
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
index 82de6895ade..499e07beebe 100644
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -31,7 +31,9 @@
#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
#define CMT_CMCSR_INIT 0x0040
#define CMT_CMCSR_CALIB 0x0000
-#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7206) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7263)
#define CMT_CMSTR 0xfffec000
#define CMT_CMCSR_0 0xfffec002
#define CMT_CMCNT_0 0xfffec004
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 628ec9a15e3..8935570008d 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -174,6 +174,7 @@ static int tmu_timer_init(void)
tmu_timer_stop();
#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
+ !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
!defined(CONFIG_CPU_SUBTYPE_SH7760) && \
!defined(CONFIG_CPU_SUBTYPE_SH7785) && \
!defined(CONFIG_CPU_SUBTYPE_SHX3)
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index cf99111cb33..a3bdc68ef02 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -1,947 +1,68 @@
-/*
- * 'traps.c' handles hardware traps and faults after we have saved some
- * state in 'entry.S'.
- *
- * SuperH version: Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2000 Philipp Rumpf
- * Copyright (C) 2000 David Howells
- * Copyright (C) 2002 - 2007 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/io.h>
#include <linux/bug.h>
-#include <linux/debug_locks.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/kdebug.h>
-#include <linux/kexec.h>
-#include <linux/limits.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#ifdef CONFIG_SH_KGDB
-#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
- if (kgdb_debug_hook && !user_mode(regs))\
- (*kgdb_debug_hook)(regs); \
-}
-#else
-#define CHK_REMOTE_DEBUG(regs)
-#endif
-
-#ifdef CONFIG_CPU_SH2
-# define TRAP_RESERVED_INST 4
-# define TRAP_ILLEGAL_SLOT_INST 6
-# define TRAP_ADDRESS_ERROR 9
-# ifdef CONFIG_CPU_SH2A
-# define TRAP_DIVZERO_ERROR 17
-# define TRAP_DIVOVF_ERROR 18
-# endif
-#else
-#define TRAP_RESERVED_INST 12
-#define TRAP_ILLEGAL_SLOT_INST 13
-#endif
-
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-{
- unsigned long p;
- int i;
-
- printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
-
- for (p = bottom & ~31; p < top; ) {
- printk("%04lx: ", p & 0xffff);
-
- for (i = 0; i < 8; i++, p += 4) {
- unsigned int val;
-
- if (p < bottom || p >= top)
- printk(" ");
- else {
- if (__get_user(val, (unsigned int __user *)p)) {
- printk("\n");
- return;
- }
- printk("%08x ", val);
- }
- }
- printk("\n");
- }
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- static int die_counter;
-
- oops_enter();
-
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
-
- printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-
- CHK_REMOTE_DEBUG(regs);
- print_modules();
- show_regs(regs);
-
- printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
- task_pid_nr(current), task_stack_page(current) + 1);
-
- if (!user_mode(regs) || in_interrupt())
- dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
-
- bust_spinlocks(0);
- add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
-
- if (kexec_should_crash(current))
- crash_kexec(regs);
-
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-
- if (panic_on_oops)
- panic("Fatal exception");
-
- oops_exit();
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char *str, struct pt_regs *regs,
- long err)
-{
- if (!user_mode(regs))
- die(str, regs, err);
-}
-
-/*
- * try and fix up kernelspace address errors
- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
- * - kernel/userspace interfaces cause a jump to an appropriate handler
- * - other kernel errors are bad
- * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
- */
-static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs)) {
- const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return 0;
- }
- die(str, regs, err);
- }
- return -EFAULT;
-}
-
-/*
- * handle an instruction that does an unaligned memory access by emulating the
- * desired behaviour
- * - note that PC _may not_ point to the faulting instruction
- * (if that instruction is in a branch delay slot)
- * - return 0 if emulation okay, -EFAULT on existential error
- */
-static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
-{
- int ret, index, count;
- unsigned long *rm, *rn;
- unsigned char *src, *dst;
-
- index = (instruction>>8)&15; /* 0x0F00 */
- rn = &regs->regs[index];
-
- index = (instruction>>4)&15; /* 0x00F0 */
- rm = &regs->regs[index];
-
- count = 1<<(instruction&3);
-
- ret = -EFAULT;
- switch (instruction>>12) {
- case 0: /* mov.[bwl] to/from memory via r0+rn */
- if (instruction & 8) {
- /* from memory */
- src = (unsigned char*) *rm;
- src += regs->regs[0];
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
-
-#ifdef __LITTLE_ENDIAN__
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
- dst += 4-count;
-
- if (__copy_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
-#endif
- } else {
- /* to memory */
- src = (unsigned char*) rm;
-#if !defined(__LITTLE_ENDIAN__)
- src += 4-count;
-#endif
- dst = (unsigned char*) *rn;
- dst += regs->regs[0];
-
- if (copy_to_user(dst, src, count))
- goto fetch_fault;
- }
- ret = 0;
- break;
-
- case 1: /* mov.l Rm,@(disp,Rn) */
- src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
- dst += (instruction&0x000F)<<2;
-
- if (copy_to_user(dst,src,4))
- goto fetch_fault;
- ret = 0;
- break;
-
- case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
- if (instruction & 4)
- *rn -= count;
- src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
-#if !defined(__LITTLE_ENDIAN__)
- src += 4-count;
-#endif
- if (copy_to_user(dst, src, count))
- goto fetch_fault;
- ret = 0;
- break;
-
- case 5: /* mov.l @(disp,Rm),Rn */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<2;
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
-
- if (copy_from_user(dst,src,4))
- goto fetch_fault;
- ret = 0;
- break;
- case 6: /* mov.[bwl] from memory, possibly with post-increment */
- src = (unsigned char*) *rm;
- if (instruction & 4)
- *rm += count;
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
-
-#ifdef __LITTLE_ENDIAN__
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
- dst += 4-count;
-
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
-#endif
- ret = 0;
- break;
-
- case 8:
- switch ((instruction&0xFF00)>>8) {
- case 0x81: /* mov.w R0,@(disp,Rn) */
- src = (unsigned char*) &regs->regs[0];
-#if !defined(__LITTLE_ENDIAN__)
- src += 2;
-#endif
- dst = (unsigned char*) *rm; /* called Rn in the spec */
- dst += (instruction&0x000F)<<1;
-
- if (copy_to_user(dst, src, 2))
- goto fetch_fault;
- ret = 0;
- break;
-
- case 0x85: /* mov.w @(disp,Rm),R0 */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<1;
- dst = (unsigned char*) &regs->regs[0];
- *(unsigned long*)dst = 0;
-
-#if !defined(__LITTLE_ENDIAN__)
- dst += 2;
-#endif
-
- if (copy_from_user(dst, src, 2))
- goto fetch_fault;
-
-#ifdef __LITTLE_ENDIAN__
- if (dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
- if (dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
-#endif
- ret = 0;
- break;
- }
- break;
- }
- return ret;
-
- fetch_fault:
- /* Argh. Address not only misaligned but also non-existent.
- * Raise an EFAULT and see if it's trapped
- */
- return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
-}
-
-/*
- * emulate the instruction in the delay slot
- * - fetches the instruction from PC+2
- */
-static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+#ifdef CONFIG_BUG
+static void handle_BUG(struct pt_regs *regs)
{
- u16 instruction;
-
- if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
- /* the instruction-fetch faulted */
- if (user_mode(regs))
- return -EFAULT;
-
- /* kernel */
- die("delay-slot-insn faulting in handle_unaligned_delayslot",
- regs, 0);
+ enum bug_trap_type tt;
+ tt = report_bug(regs->pc, regs);
+ if (tt == BUG_TRAP_TYPE_WARN) {
+ regs->pc += instruction_size(regs->pc);
+ return;
}
- return handle_unaligned_ins(instruction,regs);
+ die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
-/*
- * handle an instruction that does an unaligned memory access
- * - have to be careful of branch delay-slot instructions that fault
- * SH3:
- * - if the branch would be taken PC points to the branch
- * - if the branch would not be taken, PC points to delay-slot
- * SH4:
- * - PC always points to delayed branch
- * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
- */
-
-/* Macros to determine offset from current PC for branch instructions */
-/* Explicit type coercion is used to force sign extension where needed */
-#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
-#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
-
-/*
- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
- * opcodes..
- */
-#ifndef CONFIG_CPU_SH2A
-static int handle_unaligned_notify_count = 10;
-
-static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+int is_valid_bugaddr(unsigned long addr)
{
- u_int rm;
- int ret, index;
-
- index = (instruction>>8)&15; /* 0x0F00 */
- rm = regs->regs[index];
-
- /* shout about the first ten userspace fixups */
- if (user_mode(regs) && handle_unaligned_notify_count>0) {
- handle_unaligned_notify_count--;
-
- printk(KERN_NOTICE "Fixing up unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (u16 *)regs->pc, instruction);
- }
-
- ret = -EFAULT;
- switch (instruction&0xF000) {
- case 0x0000:
- if (instruction==0x000B) {
- /* rts */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0)
- regs->pc = regs->pr;
- }
- else if ((instruction&0x00FF)==0x0023) {
- /* braf @Rm */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0)
- regs->pc += rm + 4;
- }
- else if ((instruction&0x00FF)==0x0003) {
- /* bsrf @Rm */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0) {
- regs->pr = regs->pc + 4;
- regs->pc += rm + 4;
- }
- }
- else {
- /* mov.[bwl] to/from memory via r0+rn */
- goto simple;
- }
- break;
-
- case 0x1000: /* mov.l Rm,@(disp,Rn) */
- goto simple;
-
- case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
- goto simple;
-
- case 0x4000:
- if ((instruction&0x00FF)==0x002B) {
- /* jmp @Rm */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0)
- regs->pc = rm;
- }
- else if ((instruction&0x00FF)==0x000B) {
- /* jsr @Rm */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0) {
- regs->pr = regs->pc + 4;
- regs->pc = rm;
- }
- }
- else {
- /* mov.[bwl] to/from memory via r0+rn */
- goto simple;
- }
- break;
-
- case 0x5000: /* mov.l @(disp,Rm),Rn */
- goto simple;
-
- case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
- goto simple;
-
- case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
- switch (instruction&0x0F00) {
- case 0x0100: /* mov.w R0,@(disp,Rm) */
- goto simple;
- case 0x0500: /* mov.w @(disp,Rm),R0 */
- goto simple;
- case 0x0B00: /* bf lab - no delayslot*/
- break;
- case 0x0F00: /* bf/s lab */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0) {
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
- if ((regs->sr & 0x00000001) != 0)
- regs->pc += 4; /* next after slot */
- else
-#endif
- regs->pc += SH_PC_8BIT_OFFSET(instruction);
- }
- break;
- case 0x0900: /* bt lab - no delayslot */
- break;
- case 0x0D00: /* bt/s lab */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0) {
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
- if ((regs->sr & 0x00000001) == 0)
- regs->pc += 4; /* next after slot */
- else
-#endif
- regs->pc += SH_PC_8BIT_OFFSET(instruction);
- }
- break;
- }
- break;
-
- case 0xA000: /* bra label */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0)
- regs->pc += SH_PC_12BIT_OFFSET(instruction);
- break;
-
- case 0xB000: /* bsr label */
- ret = handle_unaligned_delayslot(regs);
- if (ret==0) {
- regs->pr = regs->pc + 4;
- regs->pc += SH_PC_12BIT_OFFSET(instruction);
- }
- break;
- }
- return ret;
-
- /* handle non-delay-slot instruction */
- simple:
- ret = handle_unaligned_ins(instruction,regs);
- if (ret==0)
- regs->pc += instruction_size(instruction);
- return ret;
+ return addr >= PAGE_OFFSET;
}
-#endif /* CONFIG_CPU_SH2A */
-
-#ifdef CONFIG_CPU_HAS_SR_RB
-#define lookup_exception_vector(x) \
- __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
-#else
-#define lookup_exception_vector(x) \
- __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
#endif
/*
- * Handle various address error exceptions:
- * - instruction address error:
- * misaligned PC
- * PC >= 0x80000000 in user mode
- * - data address error (read and write)
- * misaligned data access
- * access to >= 0x80000000 is user mode
- * Unfortuntaly we can't distinguish between instruction address error
- * and data address errors caused by read accesses.
+ * Generic trap handler.
*/
-asmlinkage void do_address_error(struct pt_regs *regs,
- unsigned long writeaccess,
- unsigned long address)
+BUILD_TRAP_HANDLER(debug)
{
- unsigned long error_code = 0;
- mm_segment_t oldfs;
- siginfo_t info;
-#ifndef CONFIG_CPU_SH2A
- u16 instruction;
- int tmp;
-#endif
-
- /* Intentional ifdef */
-#ifdef CONFIG_CPU_HAS_SR_RB
- lookup_exception_vector(error_code);
-#endif
-
- oldfs = get_fs();
-
- if (user_mode(regs)) {
- int si_code = BUS_ADRERR;
-
- local_irq_enable();
+ TRAP_HANDLER_DECL;
- /* bad PC is not something we can fix */
- if (regs->pc & 1) {
- si_code = BUS_ADRALN;
- goto uspace_segv;
- }
+ /* Rewind */
+ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-#ifndef CONFIG_CPU_SH2A
- set_fs(USER_DS);
- if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
- /* Argh. Fault on the instruction itself.
- This should never happen non-SMP
- */
- set_fs(oldfs);
- goto uspace_segv;
- }
-
- tmp = handle_unaligned_access(instruction, regs);
- set_fs(oldfs);
-
- if (tmp==0)
- return; /* sorted */
-#endif
-
-uspace_segv:
- printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
- "access (PC %lx PR %lx)\n", current->comm, regs->pc,
- regs->pr);
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, current);
- } else {
- if (regs->pc & 1)
- die("unaligned program counter", regs, error_code);
-
-#ifndef CONFIG_CPU_SH2A
- set_fs(KERNEL_DS);
- if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
- /* Argh. Fault on the instruction itself.
- This should never happen non-SMP
- */
- set_fs(oldfs);
- die("insn faulting in do_address_error", regs, 0);
- }
-
- handle_unaligned_access(instruction, regs);
- set_fs(oldfs);
-#else
- printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
- "access\n", current->comm);
+ if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
- force_sig(SIGSEGV, current);
-#endif
- }
+ force_sig(SIGTRAP, current);
}
-#ifdef CONFIG_SH_DSP
/*
- * SH-DSP support gerg@snapgear.com.
+ * Special handler for BUG() traps.
*/
-int is_dsp_inst(struct pt_regs *regs)
+BUILD_TRAP_HANDLER(bug)
{
- unsigned short inst = 0;
-
- /*
- * Safe guard if DSP mode is already enabled or we're lacking
- * the DSP altogether.
- */
- if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
- return 0;
-
- get_user(inst, ((unsigned short *) regs->pc));
-
- inst &= 0xf000;
-
- /* Check for any type of DSP or support instruction */
- if ((inst == 0xf000) || (inst == 0x4000))
- return 1;
-
- return 0;
-}
-#else
-#define is_dsp_inst(regs) (0)
-#endif /* CONFIG_SH_DSP */
+ TRAP_HANDLER_DECL;
-#ifdef CONFIG_CPU_SH2A
-asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- siginfo_t info;
-
- switch (r4) {
- case TRAP_DIVZERO_ERROR:
- info.si_code = FPE_INTDIV;
- break;
- case TRAP_DIVOVF_ERROR:
- info.si_code = FPE_INTOVF;
- break;
- }
-
- force_sig_info(SIGFPE, &info, current);
-}
-#endif
-
-/* arch/sh/kernel/cpu/sh4/fpu.c */
-extern int do_fpu_inst(unsigned short, struct pt_regs *);
-extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, struct pt_regs __regs);
-
-asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- unsigned long error_code;
- struct task_struct *tsk = current;
-
-#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst = 0;
- int err;
-
- get_user(inst, (unsigned short*)regs->pc);
-
- err = do_fpu_inst(inst, regs);
- if (!err) {
- regs->pc += instruction_size(inst);
- return;
- }
- /* not a FPU inst. */
-#endif
+ /* Rewind */
+ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-#ifdef CONFIG_SH_DSP
- /* Check if it's a DSP instruction */
- if (is_dsp_inst(regs)) {
- /* Enable DSP mode, and restart instruction. */
- regs->sr |= SR_DSP;
+ if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
+ SIGTRAP) == NOTIFY_STOP)
return;
- }
-#endif
-
- lookup_exception_vector(error_code);
-
- local_irq_enable();
- CHK_REMOTE_DEBUG(regs);
- force_sig(SIGILL, tsk);
- die_if_no_fixup("reserved instruction", regs, error_code);
-}
-
-#ifdef CONFIG_SH_FPU_EMU
-static int emulate_branch(unsigned short inst, struct pt_regs* regs)
-{
- /*
- * bfs: 8fxx: PC+=d*2+4;
- * bts: 8dxx: PC+=d*2+4;
- * bra: axxx: PC+=D*2+4;
- * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
- * braf:0x23: PC+=Rn*2+4;
- * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
- * jmp: 4x2b: PC=Rn;
- * jsr: 4x0b: PC=Rn after PR=PC+4;
- * rts: 000b: PC=PR;
- */
- if ((inst & 0xfd00) == 0x8d00) {
- regs->pc += SH_PC_8BIT_OFFSET(inst);
- return 0;
- }
-
- if ((inst & 0xe000) == 0xa000) {
- regs->pc += SH_PC_12BIT_OFFSET(inst);
- return 0;
- }
-
- if ((inst & 0xf0df) == 0x0003) {
- regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
- return 0;
- }
-
- if ((inst & 0xf0df) == 0x400b) {
- regs->pc = regs->regs[(inst & 0x0f00) >> 8];
- return 0;
- }
-
- if ((inst & 0xffff) == 0x000b) {
- regs->pc = regs->pr;
- return 0;
- }
-
- return 1;
-}
-#endif
-
-asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- unsigned long error_code;
- struct task_struct *tsk = current;
-#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst = 0;
-
- get_user(inst, (unsigned short *)regs->pc + 1);
- if (!do_fpu_inst(inst, regs)) {
- get_user(inst, (unsigned short *)regs->pc);
- if (!emulate_branch(inst, regs))
- return;
- /* fault in branch.*/
- }
- /* not a FPU inst. */
-#endif
-
- lookup_exception_vector(error_code);
-
- local_irq_enable();
- CHK_REMOTE_DEBUG(regs);
- force_sig(SIGILL, tsk);
- die_if_no_fixup("illegal slot instruction", regs, error_code);
-}
-
-asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- long ex;
-
- lookup_exception_vector(ex);
- die_if_kernel("exception", regs, ex);
-}
-
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
- register unsigned long vbr;
-
- /*
- * Read the old value of the VBR register to initialise
- * the vector through which debug and BIOS traps are
- * delegated by the Linux trap handler.
- */
- asm volatile("stc vbr, %0" : "=r" (vbr));
-
- gdb_vbr_vector = (void *)(vbr + 0x100);
- printk("Setting GDB trap vector to 0x%08lx\n",
- (unsigned long)gdb_vbr_vector);
-}
-#endif
-
-void __cpuinit per_cpu_trap_init(void)
-{
- extern void *vbr_base;
-
-#ifdef CONFIG_SH_STANDARD_BIOS
- if (raw_smp_processor_id() == 0)
- gdb_vbr_init();
-#endif
-
- /* NOTE: The VBR value should be at P1
- (or P2, virtural "fixed" address space).
- It's definitely should not in physical address. */
-
- asm volatile("ldc %0, vbr"
- : /* no output */
- : "r" (&vbr_base)
- : "memory");
-}
-
-void *set_exception_table_vec(unsigned int vec, void *handler)
-{
- extern void *exception_handling_table[];
- void *old_handler;
-
- old_handler = exception_handling_table[vec];
- exception_handling_table[vec] = handler;
- return old_handler;
-}
-
-extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs);
-
-void __init trap_init(void)
-{
- set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
- set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
-
-#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
- defined(CONFIG_SH_FPU_EMU)
- /*
- * For SH-4 lacking an FPU, treat floating point instructions as
- * reserved. They'll be handled in the math-emu case, or faulted on
- * otherwise.
- */
- set_exception_table_evt(0x800, do_reserved_inst);
- set_exception_table_evt(0x820, do_illegal_slot_inst);
-#elif defined(CONFIG_SH_FPU)
-#ifdef CONFIG_CPU_SUBTYPE_SHX3
- set_exception_table_evt(0xd80, do_fpu_state_restore);
- set_exception_table_evt(0xda0, do_fpu_state_restore);
-#else
- set_exception_table_evt(0x800, do_fpu_state_restore);
- set_exception_table_evt(0x820, do_fpu_state_restore);
-#endif
-#endif
-
-#ifdef CONFIG_CPU_SH2
- set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
-#endif
-#ifdef CONFIG_CPU_SH2A
- set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
- set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
-#endif
-
- /* Setup VBR for boot cpu */
- per_cpu_trap_init();
-}
#ifdef CONFIG_BUG
-void handle_BUG(struct pt_regs *regs)
-{
- enum bug_trap_type tt;
- tt = report_bug(regs->pc, regs);
- if (tt == BUG_TRAP_TYPE_WARN) {
- regs->pc += 2;
- return;
+ if (__kernel_text_address(instruction_pointer(regs))) {
+ opcode_t insn = *(opcode_t *)instruction_pointer(regs);
+ if (insn == TRAPA_BUG_OPCODE)
+ handle_BUG(regs);
}
-
- die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
-}
-
-int is_valid_bugaddr(unsigned long addr)
-{
- return addr >= PAGE_OFFSET;
-}
-#endif
-
-void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
-{
- unsigned long addr;
-
- if (regs && user_mode(regs))
- return;
-
- printk("\nCall trace: ");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
#endif
- while (!kstack_end(sp)) {
- addr = *sp++;
- if (kernel_text_address(addr))
- print_ip_sym(addr);
- }
-
- printk("\n");
-
- if (!tsk)
- tsk = current;
-
- debug_show_held_locks(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
- unsigned long stack;
-
- if (!tsk)
- tsk = current;
- if (tsk == current)
- sp = (unsigned long *)current_stack_pointer;
- else
- sp = (unsigned long *)tsk->thread.sp;
-
- stack = (unsigned long)sp;
- dump_mem("Stack: ", stack, THREAD_SIZE +
- (unsigned long)task_stack_page(tsk));
- show_trace(tsk, sp, NULL);
-}
-
-void dump_stack(void)
-{
- show_stack(NULL, NULL);
+ force_sig(SIGTRAP, current);
}
-EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
new file mode 100644
index 00000000000..2e58f7a6b74
--- /dev/null
+++ b/arch/sh/kernel/traps_32.c
@@ -0,0 +1,919 @@
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf
+ * Copyright (C) 2000 David Howells
+ * Copyright (C) 2002 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/debug_locks.h>
+#include <linux/kdebug.h>
+#include <linux/kexec.h>
+#include <linux/limits.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_SH_KGDB
+#include <asm/kgdb.h>
+#define CHK_REMOTE_DEBUG(regs) \
+{ \
+ if (kgdb_debug_hook && !user_mode(regs))\
+ (*kgdb_debug_hook)(regs); \
+}
+#else
+#define CHK_REMOTE_DEBUG(regs)
+#endif
+
+#ifdef CONFIG_CPU_SH2
+# define TRAP_RESERVED_INST 4
+# define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_ADDRESS_ERROR 9
+# ifdef CONFIG_CPU_SH2A
+# define TRAP_DIVZERO_ERROR 17
+# define TRAP_DIVOVF_ERROR 18
+# endif
+#else
+#define TRAP_RESERVED_INST 12
+#define TRAP_ILLEGAL_SLOT_INST 13
+#endif
+
+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p;
+ int i;
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top; ) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ if (__get_user(val, (unsigned int __user *)p)) {
+ printk("\n");
+ return;
+ }
+ printk("%08x ", val);
+ }
+ }
+ printk("\n");
+ }
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+
+ oops_enter();
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+
+ CHK_REMOTE_DEBUG(regs);
+ print_modules();
+ show_regs(regs);
+
+ printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+ task_pid_nr(current), task_stack_page(current) + 1);
+
+ if (!user_mode(regs) || in_interrupt())
+ dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+ (unsigned long)task_stack_page(current));
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ oops_exit();
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs,
+ long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
+ */
+static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return 0;
+ }
+ die(str, regs, err);
+ }
+ return -EFAULT;
+}
+
+/*
+ * handle an instruction that does an unaligned memory access by emulating the
+ * desired behaviour
+ * - note that PC _may not_ point to the faulting instruction
+ * (if that instruction is in a branch delay slot)
+ * - return 0 if emulation okay, -EFAULT on existential error
+ */
+static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+{
+ int ret, index, count;
+ unsigned long *rm, *rn;
+ unsigned char *src, *dst;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rn = &regs->regs[index];
+
+ index = (instruction>>4)&15; /* 0x00F0 */
+ rm = &regs->regs[index];
+
+ count = 1<<(instruction&3);
+
+ ret = -EFAULT;
+ switch (instruction>>12) {
+ case 0: /* mov.[bwl] to/from memory via r0+rn */
+ if (instruction & 8) {
+ /* from memory */
+ src = (unsigned char*) *rm;
+ src += regs->regs[0];
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (__copy_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ } else {
+ /* to memory */
+ src = (unsigned char*) rm;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ dst = (unsigned char*) *rn;
+ dst += regs->regs[0];
+
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ }
+ ret = 0;
+ break;
+
+ case 1: /* mov.l Rm,@(disp,Rn) */
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+ dst += (instruction&0x000F)<<2;
+
+ if (copy_to_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+ if (instruction & 4)
+ *rn -= count;
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 5: /* mov.l @(disp,Rm),Rn */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<2;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+ if (copy_from_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 6: /* mov.[bwl] from memory, possibly with post-increment */
+ src = (unsigned char*) *rm;
+ if (instruction & 4)
+ *rm += count;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+
+ case 8:
+ switch ((instruction&0xFF00)>>8) {
+ case 0x81: /* mov.w R0,@(disp,Rn) */
+ src = (unsigned char*) &regs->regs[0];
+#if !defined(__LITTLE_ENDIAN__)
+ src += 2;
+#endif
+ dst = (unsigned char*) *rm; /* called Rn in the spec */
+ dst += (instruction&0x000F)<<1;
+
+ if (copy_to_user(dst, src, 2))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 0x85: /* mov.w @(disp,Rm),R0 */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<1;
+ dst = (unsigned char*) &regs->regs[0];
+ *(unsigned long*)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (copy_from_user(dst, src, 2))
+ goto fetch_fault;
+
+#ifdef __LITTLE_ENDIAN__
+ if (dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ if (dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+ }
+ break;
+ }
+ return ret;
+
+ fetch_fault:
+ /* Argh. Address not only misaligned but also non-existent.
+ * Raise an EFAULT and see if it's trapped
+ */
+ return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+}
+
+/*
+ * emulate the instruction in the delay slot
+ * - fetches the instruction from PC+2
+ */
+static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+{
+ u16 instruction;
+
+ if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+ /* the instruction-fetch faulted */
+ if (user_mode(regs))
+ return -EFAULT;
+
+ /* kernel */
+ die("delay-slot-insn faulting in handle_unaligned_delayslot",
+ regs, 0);
+ }
+
+ return handle_unaligned_ins(instruction,regs);
+}
+
+/*
+ * handle an instruction that does an unaligned memory access
+ * - have to be careful of branch delay-slot instructions that fault
+ * SH3:
+ * - if the branch would be taken PC points to the branch
+ * - if the branch would not be taken, PC points to delay-slot
+ * SH4:
+ * - PC always points to delayed branch
+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
+ */
+
+/* Macros to determine offset from current PC for branch instructions */
+/* Explicit type coercion is used to force sign extension where needed */
+#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
+#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
+static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+{
+ u_int rm;
+ int ret, index;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rm = regs->regs[index];
+
+ /* shout about the first ten userspace fixups */
+ if (user_mode(regs) && handle_unaligned_notify_count>0) {
+ handle_unaligned_notify_count--;
+
+ printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, task_pid_nr(current),
+ (u16 *)regs->pc, instruction);
+ }
+
+ ret = -EFAULT;
+ switch (instruction&0xF000) {
+ case 0x0000:
+ if (instruction==0x000B) {
+ /* rts */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = regs->pr;
+ }
+ else if ((instruction&0x00FF)==0x0023) {
+ /* braf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += rm + 4;
+ }
+ else if ((instruction&0x00FF)==0x0003) {
+ /* bsrf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += rm + 4;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ goto simple;
+
+ case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+ goto simple;
+
+ case 0x4000:
+ if ((instruction&0x00FF)==0x002B) {
+ /* jmp @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = rm;
+ }
+ else if ((instruction&0x00FF)==0x000B) {
+ /* jsr @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc = rm;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
+
+ case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+ goto simple;
+
+ case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+ switch (instruction&0x0F00) {
+ case 0x0100: /* mov.w R0,@(disp,Rm) */
+ goto simple;
+ case 0x0500: /* mov.w @(disp,Rm),R0 */
+ goto simple;
+ case 0x0B00: /* bf lab - no delayslot*/
+ break;
+ case 0x0F00: /* bf/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+ if ((regs->sr & 0x00000001) != 0)
+ regs->pc += 4; /* next after slot */
+ else
+#endif
+ regs->pc += SH_PC_8BIT_OFFSET(instruction);
+ }
+ break;
+ case 0x0900: /* bt lab - no delayslot */
+ break;
+ case 0x0D00: /* bt/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+ if ((regs->sr & 0x00000001) == 0)
+ regs->pc += 4; /* next after slot */
+ else
+#endif
+ regs->pc += SH_PC_8BIT_OFFSET(instruction);
+ }
+ break;
+ }
+ break;
+
+ case 0xA000: /* bra label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += SH_PC_12BIT_OFFSET(instruction);
+ break;
+
+ case 0xB000: /* bsr label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += SH_PC_12BIT_OFFSET(instruction);
+ }
+ break;
+ }
+ return ret;
+
+ /* handle non-delay-slot instruction */
+ simple:
+ ret = handle_unaligned_ins(instruction,regs);
+ if (ret==0)
+ regs->pc += instruction_size(instruction);
+ return ret;
+}
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
+
+/*
+ * Handle various address error exceptions:
+ * - instruction address error:
+ * misaligned PC
+ * PC >= 0x80000000 in user mode
+ * - data address error (read and write)
+ * misaligned data access
+ * access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read accesses.
+ */
+asmlinkage void do_address_error(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address)
+{
+ unsigned long error_code = 0;
+ mm_segment_t oldfs;
+ siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
+ u16 instruction;
+ int tmp;
+#endif
+
+ /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+ lookup_exception_vector(error_code);
+#endif
+
+ oldfs = get_fs();
+
+ if (user_mode(regs)) {
+ int si_code = BUS_ADRERR;
+
+ local_irq_enable();
+
+ /* bad PC is not something we can fix */
+ if (regs->pc & 1) {
+ si_code = BUS_ADRALN;
+ goto uspace_segv;
+ }
+
+#ifndef CONFIG_CPU_SH2A
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+
+ tmp = handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+
+ if (tmp==0)
+ return; /* sorted */
+#endif
+
+uspace_segv:
+ printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+ "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+ regs->pr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else {
+ if (regs->pc & 1)
+ die("unaligned program counter", regs, error_code);
+
+#ifndef CONFIG_CPU_SH2A
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ die("insn faulting in do_address_error", regs, 0);
+ }
+
+ handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+#else
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+ "access\n", current->comm);
+
+ force_sig(SIGSEGV, current);
+#endif
+ }
+}
+
+#ifdef CONFIG_SH_DSP
+/*
+ * SH-DSP support gerg@snapgear.com.
+ */
+int is_dsp_inst(struct pt_regs *regs)
+{
+ unsigned short inst = 0;
+
+ /*
+ * Safe guard if DSP mode is already enabled or we're lacking
+ * the DSP altogether.
+ */
+ if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
+ return 0;
+
+ get_user(inst, ((unsigned short *) regs->pc));
+
+ inst &= 0xf000;
+
+ /* Check for any type of DSP or support instruction */
+ if ((inst == 0xf000) || (inst == 0x4000))
+ return 1;
+
+ return 0;
+}
+#else
+#define is_dsp_inst(regs) (0)
+#endif /* CONFIG_SH_DSP */
+
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ siginfo_t info;
+
+ switch (r4) {
+ case TRAP_DIVZERO_ERROR:
+ info.si_code = FPE_INTDIV;
+ break;
+ case TRAP_DIVOVF_ERROR:
+ info.si_code = FPE_INTOVF;
+ break;
+ }
+
+ force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ unsigned long error_code;
+ struct task_struct *tsk = current;
+
+#ifdef CONFIG_SH_FPU_EMU
+ unsigned short inst = 0;
+ int err;
+
+ get_user(inst, (unsigned short*)regs->pc);
+
+ err = do_fpu_inst(inst, regs);
+ if (!err) {
+ regs->pc += instruction_size(inst);
+ return;
+ }
+ /* not a FPU inst. */
+#endif
+
+#ifdef CONFIG_SH_DSP
+ /* Check if it's a DSP instruction */
+ if (is_dsp_inst(regs)) {
+ /* Enable DSP mode, and restart instruction. */
+ regs->sr |= SR_DSP;
+ return;
+ }
+#endif
+
+ lookup_exception_vector(error_code);
+
+ local_irq_enable();
+ CHK_REMOTE_DEBUG(regs);
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("reserved instruction", regs, error_code);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+static int emulate_branch(unsigned short inst, struct pt_regs* regs)
+{
+ /*
+ * bfs: 8fxx: PC+=d*2+4;
+ * bts: 8dxx: PC+=d*2+4;
+ * bra: axxx: PC+=D*2+4;
+ * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
+ * braf:0x23: PC+=Rn*2+4;
+ * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
+ * jmp: 4x2b: PC=Rn;
+ * jsr: 4x0b: PC=Rn after PR=PC+4;
+ * rts: 000b: PC=PR;
+ */
+ if ((inst & 0xfd00) == 0x8d00) {
+ regs->pc += SH_PC_8BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xe000) == 0xa000) {
+ regs->pc += SH_PC_12BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x0003) {
+ regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x400b) {
+ regs->pc = regs->regs[(inst & 0x0f00) >> 8];
+ return 0;
+ }
+
+ if ((inst & 0xffff) == 0x000b) {
+ regs->pc = regs->pr;
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ unsigned long error_code;
+ struct task_struct *tsk = current;
+#ifdef CONFIG_SH_FPU_EMU
+ unsigned short inst = 0;
+
+ get_user(inst, (unsigned short *)regs->pc + 1);
+ if (!do_fpu_inst(inst, regs)) {
+ get_user(inst, (unsigned short *)regs->pc);
+ if (!emulate_branch(inst, regs))
+ return;
+ /* fault in branch.*/
+ }
+ /* not a FPU inst. */
+#endif
+
+ lookup_exception_vector(error_code);
+
+ local_irq_enable();
+ CHK_REMOTE_DEBUG(regs);
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("illegal slot instruction", regs, error_code);
+}
+
+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ long ex;
+
+ lookup_exception_vector(ex);
+ die_if_kernel("exception", regs, ex);
+}
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+void *gdb_vbr_vector;
+
+static inline void __init gdb_vbr_init(void)
+{
+ register unsigned long vbr;
+
+ /*
+ * Read the old value of the VBR register to initialise
+ * the vector through which debug and BIOS traps are
+ * delegated by the Linux trap handler.
+ */
+ asm volatile("stc vbr, %0" : "=r" (vbr));
+
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk("Setting GDB trap vector to 0x%08lx\n",
+ (unsigned long)gdb_vbr_vector);
+}
+#endif
+
+void __cpuinit per_cpu_trap_init(void)
+{
+ extern void *vbr_base;
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+ if (raw_smp_processor_id() == 0)
+ gdb_vbr_init();
+#endif
+
+ /* NOTE: The VBR value should be at P1
+ (or P2, virtural "fixed" address space).
+ It's definitely should not in physical address. */
+
+ asm volatile("ldc %0, vbr"
+ : /* no output */
+ : "r" (&vbr_base)
+ : "memory");
+}
+
+void *set_exception_table_vec(unsigned int vec, void *handler)
+{
+ extern void *exception_handling_table[];
+ void *old_handler;
+
+ old_handler = exception_handling_table[vec];
+ exception_handling_table[vec] = handler;
+ return old_handler;
+}
+
+void __init trap_init(void)
+{
+ set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
+ set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
+
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
+ defined(CONFIG_SH_FPU_EMU)
+ /*
+ * For SH-4 lacking an FPU, treat floating point instructions as
+ * reserved. They'll be handled in the math-emu case, or faulted on
+ * otherwise.
+ */
+ set_exception_table_evt(0x800, do_reserved_inst);
+ set_exception_table_evt(0x820, do_illegal_slot_inst);
+#elif defined(CONFIG_SH_FPU)
+#ifdef CONFIG_CPU_SUBTYPE_SHX3
+ set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
+ set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
+#else
+ set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
+ set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
+#endif
+#endif
+
+#ifdef CONFIG_CPU_SH2
+ set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+ set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+ set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
+ /* Setup VBR for boot cpu */
+ per_cpu_trap_init();
+}
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+
+ if (regs && user_mode(regs))
+ return;
+
+ printk("\nCall trace: ");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (kernel_text_address(addr))
+ print_ip_sym(addr);
+ }
+
+ printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long stack;
+
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)tsk->thread.sp;
+
+ stack = (unsigned long)sp;
+ dump_mem("Stack: ", stack, THREAD_SIZE +
+ (unsigned long)task_stack_page(tsk));
+ show_trace(tsk, sp, NULL);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
new file mode 100644
index 00000000000..c0b3c6f6edb
--- /dev/null
+++ b/arch/sh/kernel/traps_64.c
@@ -0,0 +1,975 @@
+/*
+ * arch/sh/kernel/traps_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+
+#undef DEBUG_EXCEPTION
+#ifdef DEBUG_EXCEPTION
+/* implemented in ../lib/dbg.c */
+extern void show_excp_regs(char *fname, int trapnr, int signr,
+ struct pt_regs *regs);
+#else
+#define show_excp_regs(a, b, c, d)
+#endif
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+ unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
+
+#define DO_ERROR(trapnr, signr, str, name, tsk) \
+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+{ \
+ do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
+}
+
+spinlock_t die_lock;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ printk("%s: %lx\n", str, (err & 0xffffff));
+ show_regs(regs);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+ die(str, regs, err);
+ }
+}
+
+DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
+DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
+
+
+/* Implement misaligned load/store handling for kernel (and optionally for user
+ mode too). Limitation : only SHmedia mode code is handled - there is no
+ handling at all for misaligned accesses occurring in SHcompact code yet. */
+
+static int misaligned_fixup(struct pt_regs *regs);
+
+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0) {
+ do_unhandled_exception(7, SIGSEGV, "address error(load)",
+ "do_address_error_load",
+ error_code, regs, current);
+ }
+ return;
+}
+
+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0) {
+ do_unhandled_exception(8, SIGSEGV, "address error(store)",
+ "do_address_error_store",
+ error_code, regs, current);
+ }
+ return;
+}
+
+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
+
+#define OPCODE_INVALID 0
+#define OPCODE_USER_VALID 1
+#define OPCODE_PRIV_VALID 2
+
+/* getcon/putcon - requires checking which control register is referenced. */
+#define OPCODE_CTRL_REG 3
+
+/* Table of valid opcodes for SHmedia mode.
+ Form a 10-bit value by concatenating the major/minor opcodes i.e.
+ opcode[31:26,20:16]. The 6 MSBs of this value index into the following
+ array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+ LSBs==4'b0000 etc). */
+static unsigned long shmedia_opcode_table[64] = {
+ 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+ 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+ 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
+};
+
+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
+{
+ /* Workaround SH5-101 cut2 silicon defect #2815 :
+ in some situations, inter-mode branches from SHcompact -> SHmedia
+ which should take ITLBMISS or EXECPROT exceptions at the target
+ falsely take RESINST at the target instead. */
+
+ unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+ unsigned long pc, aligned_pc;
+ int get_user_error;
+ int trapnr = 12;
+ int signr = SIGILL;
+ char *exception_name = "reserved_instruction";
+
+ pc = regs->pc;
+ if ((pc & 3) == 1) {
+ /* SHmedia : check for defect. This requires executable vmas
+ to be readable too. */
+ aligned_pc = pc & ~3;
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ get_user_error = -EFAULT;
+ } else {
+ get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ }
+ if (get_user_error >= 0) {
+ unsigned long index, shift;
+ unsigned long major, minor, combined;
+ unsigned long reserved_field;
+ reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+ combined = (major << 4) | minor;
+ index = major;
+ shift = minor << 1;
+ if (reserved_field == 0) {
+ int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+ switch (opcode_state) {
+ case OPCODE_INVALID:
+ /* Trap. */
+ break;
+ case OPCODE_USER_VALID:
+ /* Restart the instruction : the branch to the instruction will now be from an RTE
+ not from SHcompact so the silicon defect won't be triggered. */
+ return;
+ case OPCODE_PRIV_VALID:
+ if (!user_mode(regs)) {
+ /* Should only ever get here if a module has
+ SHcompact code inside it. If so, the same fix up is needed. */
+ return; /* same reason */
+ }
+ /* Otherwise, user mode trying to execute a privileged instruction -
+ fall through to trap. */
+ break;
+ case OPCODE_CTRL_REG:
+ /* If in privileged mode, return as above. */
+ if (!user_mode(regs)) return;
+ /* In user mode ... */
+ if (combined == 0x9f) { /* GETCON */
+ unsigned long regno = (opcode >> 20) & 0x3f;
+ if (regno >= 62) {
+ return;
+ }
+ /* Otherwise, reserved or privileged control register, => trap */
+ } else if (combined == 0x1bf) { /* PUTCON */
+ unsigned long regno = (opcode >> 4) & 0x3f;
+ if (regno >= 62) {
+ return;
+ }
+ /* Otherwise, reserved or privileged control register, => trap */
+ } else {
+ /* Trap */
+ }
+ break;
+ default:
+ /* Fall through to trap. */
+ break;
+ }
+ }
+ /* fall through to normal resinst processing */
+ } else {
+ /* Error trying to read opcode. This typically means a
+ real fault, not a RESINST any more. So change the
+ codes. */
+ trapnr = 87;
+ exception_name = "address error (exec)";
+ signr = SIGSEGV;
+ }
+ }
+
+ do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
+}
+
+#else /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* If the workaround isn't needed, this is just a straightforward reserved
+ instruction */
+DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
+
+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* Called with interrupts disabled */
+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+{
+ show_excp_regs(__FUNCTION__, -1, -1, regs);
+ die_if_kernel("exception", regs, ex);
+}
+
+int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+{
+ /* Syscall debug */
+ printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
+
+ die_if_kernel("unknown trapa", regs, scId);
+
+ return -ENOSYS;
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+#ifdef CONFIG_KALLSYMS
+ extern void sh64_unwind(struct pt_regs *regs);
+ struct pt_regs *regs;
+
+ regs = tsk ? tsk->thread.kregs : NULL;
+
+ sh64_unwind(regs);
+#else
+ printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
+#endif
+}
+
+void show_task(unsigned long *sp)
+{
+ show_stack(NULL, sp);
+}
+
+void dump_stack(void)
+{
+ show_task(NULL);
+}
+/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
+EXPORT_SYMBOL(dump_stack);
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+ unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
+{
+ show_excp_regs(fn_name, trapnr, signr, regs);
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+
+ if (user_mode(regs))
+ force_sig(signr, tsk);
+
+ die_if_no_fixup(str, regs, error_code);
+}
+
+static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
+{
+ int get_user_error;
+ unsigned long aligned_pc;
+ unsigned long opcode;
+
+ if ((pc & 3) == 1) {
+ /* SHmedia */
+ aligned_pc = pc & ~3;
+ if (from_user_mode) {
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ get_user_error = -EFAULT;
+ } else {
+ get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ *result_opcode = opcode;
+ }
+ return get_user_error;
+ } else {
+ /* If the fault was in the kernel, we can either read
+ * this directly, or if not, we fault.
+ */
+ *result_opcode = *(unsigned long *) aligned_pc;
+ return 0;
+ }
+ } else if ((pc & 1) == 0) {
+ /* SHcompact */
+ /* TODO : provide handling for this. We don't really support
+ user-mode SHcompact yet, and for a kernel fault, this would
+ have to come from a module built for SHcompact. */
+ return -EFAULT;
+ } else {
+ /* misaligned */
+ return -EFAULT;
+ }
+}
+
+static int address_is_sign_extended(__u64 a)
+{
+ __u64 b;
+#if (NEFF == 32)
+ b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
+ return (b == a) ? 1 : 0;
+#else
+#error "Sign extend check only works for NEFF==32"
+#endif
+}
+
+static int generate_and_check_address(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ __u64 *address)
+{
+ /* return -1 for fault, 0 for OK */
+
+ __u64 base_address, addr;
+ int basereg;
+
+ basereg = (opcode >> 20) & 0x3f;
+ base_address = regs->regs[basereg];
+ if (displacement_not_indexed) {
+ __s64 displacement;
+ displacement = (opcode >> 10) & 0x3ff;
+ displacement = ((displacement << 54) >> 54); /* sign extend */
+ addr = (__u64)((__s64)base_address + (displacement << width_shift));
+ } else {
+ __u64 offset;
+ int offsetreg;
+ offsetreg = (opcode >> 10) & 0x3f;
+ offset = regs->regs[offsetreg];
+ addr = base_address + offset;
+ }
+
+ /* Check sign extended */
+ if (!address_is_sign_extended(addr)) {
+ return -1;
+ }
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ /* Check accessible. For misaligned access in the kernel, assume the
+ address is always accessible (and if not, just fault when the
+ load/store gets done.) */
+ if (user_mode(regs)) {
+ if (addr >= TASK_SIZE) {
+ return -1;
+ }
+ /* Do access_ok check later - it depends on whether it's a load or a store. */
+ }
+#endif
+
+ *address = addr;
+ return 0;
+}
+
+/* Default value as for sh */
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+static int user_mode_unaligned_fixup_count = 10;
+static int user_mode_unaligned_fixup_enable = 1;
+#endif
+
+static int kernel_mode_unaligned_fixup_count = 32;
+
+static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
+{
+ unsigned short x;
+ unsigned char *p, *q;
+ p = (unsigned char *) (int) address;
+ q = (unsigned char *) &x;
+ q[0] = p[0];
+ q[1] = p[1];
+
+ if (do_sign_extend) {
+ *result = (__u64)(__s64) *(short *) &x;
+ } else {
+ *result = (__u64) x;
+ }
+}
+
+static void misaligned_kernel_word_store(__u64 address, __u64 value)
+{
+ unsigned short x;
+ unsigned char *p, *q;
+ p = (unsigned char *) (int) address;
+ q = (unsigned char *) &x;
+
+ x = (__u16) value;
+ p[0] = q[0];
+ p[1] = q[1];
+}
+
+static int misaligned_load(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_sign_extend)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int destreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ destreg = (opcode >> 4) & 0x3f;
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ if (user_mode(regs)) {
+ __u64 buffer;
+
+ if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ switch (width_shift) {
+ case 1:
+ if (do_sign_extend) {
+ regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
+ } else {
+ regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
+ }
+ break;
+ case 2:
+ regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
+ break;
+ case 3:
+ regs->regs[destreg] = buffer;
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ } else
+#endif
+ {
+ /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+ __u64 lo, hi;
+
+ switch (width_shift) {
+ case 1:
+ misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
+ break;
+ case 2:
+ asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
+ asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
+ regs->regs[destreg] = lo | hi;
+ break;
+ case 3:
+ asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
+ asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
+ regs->regs[destreg] = lo | hi;
+ break;
+
+ default:
+ printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static int misaligned_store(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int srcreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ srcreg = (opcode >> 4) & 0x3f;
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ if (user_mode(regs)) {
+ __u64 buffer;
+
+ if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ switch (width_shift) {
+ case 1:
+ *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
+ break;
+ case 2:
+ *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
+ break;
+ case 3:
+ buffer = regs->regs[srcreg];
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+
+ if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ } else
+#endif
+ {
+ /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+ __u64 val = regs->regs[srcreg];
+
+ switch (width_shift) {
+ case 1:
+ misaligned_kernel_word_store(address, val);
+ break;
+ case 2:
+ asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
+ asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
+ break;
+ case 3:
+ asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
+ asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
+ break;
+
+ default:
+ printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
+ error. */
+static int misaligned_fpu_load(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_paired_load)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int destreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ destreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+ __u32 buflo, bufhi;
+
+ if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ /* 'current' may be the current owner of the FPU state, so
+ context switch the registers into memory so they can be
+ indexed by register number. */
+ if (last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current, regs);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ buflo = *(__u32*) &buffer;
+ bufhi = *(1 + (__u32*) &buffer);
+
+ switch (width_shift) {
+ case 2:
+ current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ break;
+ case 3:
+ if (do_paired_load) {
+ current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ } else {
+#if defined(CONFIG_LITTLE_ENDIAN)
+ current->thread.fpu.hard.fp_regs[destreg] = bufhi;
+ current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+#else
+ current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+#endif
+ }
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ return 0;
+ } else {
+ die ("Misaligned FPU load inside kernel", regs, 0);
+ return -1;
+ }
+
+
+}
+
+static int misaligned_fpu_store(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_paired_load)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int srcreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ srcreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+ /* Initialise these to NaNs. */
+ __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
+
+ if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ /* 'current' may be the current owner of the FPU state, so
+ context switch the registers into memory so they can be
+ indexed by register number. */
+ if (last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current, regs);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ switch (width_shift) {
+ case 2:
+ buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ break;
+ case 3:
+ if (do_paired_load) {
+ buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ } else {
+#if defined(CONFIG_LITTLE_ENDIAN)
+ bufhi = current->thread.fpu.hard.fp_regs[srcreg];
+ buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+#else
+ buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+#endif
+ }
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+
+ *(__u32*) &buffer = buflo;
+ *(1 + (__u32*) &buffer) = bufhi;
+ if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ return 0;
+ } else {
+ die ("Misaligned FPU load inside kernel", regs, 0);
+ return -1;
+ }
+}
+#endif
+
+static int misaligned_fixup(struct pt_regs *regs)
+{
+ unsigned long opcode;
+ int error;
+ int major, minor;
+
+#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ /* Never fixup user mode misaligned accesses without this option enabled. */
+ return -1;
+#else
+ if (!user_mode_unaligned_fixup_enable) return -1;
+#endif
+
+ error = read_opcode(regs->pc, &opcode, user_mode(regs));
+ if (error < 0) {
+ return error;
+ }
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
+ --user_mode_unaligned_fixup_count;
+ /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
+ printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+ current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+ } else
+#endif
+ if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
+ --kernel_mode_unaligned_fixup_count;
+ if (in_interrupt()) {
+ printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
+ (__u32)regs->pc, opcode);
+ } else {
+ printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+ current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+ }
+ }
+
+
+ switch (major) {
+ case (0x84>>2): /* LD.W */
+ error = misaligned_load(regs, opcode, 1, 1, 1);
+ break;
+ case (0xb0>>2): /* LD.UW */
+ error = misaligned_load(regs, opcode, 1, 1, 0);
+ break;
+ case (0x88>>2): /* LD.L */
+ error = misaligned_load(regs, opcode, 1, 2, 1);
+ break;
+ case (0x8c>>2): /* LD.Q */
+ error = misaligned_load(regs, opcode, 1, 3, 0);
+ break;
+
+ case (0xa4>>2): /* ST.W */
+ error = misaligned_store(regs, opcode, 1, 1);
+ break;
+ case (0xa8>>2): /* ST.L */
+ error = misaligned_store(regs, opcode, 1, 2);
+ break;
+ case (0xac>>2): /* ST.Q */
+ error = misaligned_store(regs, opcode, 1, 3);
+ break;
+
+ case (0x40>>2): /* indexed loads */
+ switch (minor) {
+ case 0x1: /* LDX.W */
+ error = misaligned_load(regs, opcode, 0, 1, 1);
+ break;
+ case 0x5: /* LDX.UW */
+ error = misaligned_load(regs, opcode, 0, 1, 0);
+ break;
+ case 0x2: /* LDX.L */
+ error = misaligned_load(regs, opcode, 0, 2, 1);
+ break;
+ case 0x3: /* LDX.Q */
+ error = misaligned_load(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+
+ case (0x60>>2): /* indexed stores */
+ switch (minor) {
+ case 0x1: /* STX.W */
+ error = misaligned_store(regs, opcode, 0, 1);
+ break;
+ case 0x2: /* STX.L */
+ error = misaligned_store(regs, opcode, 0, 2);
+ break;
+ case 0x3: /* STX.Q */
+ error = misaligned_store(regs, opcode, 0, 3);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ case (0x94>>2): /* FLD.S */
+ error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
+ break;
+ case (0x98>>2): /* FLD.P */
+ error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
+ break;
+ case (0x9c>>2): /* FLD.D */
+ error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
+ break;
+ case (0x1c>>2): /* floating indexed loads */
+ switch (minor) {
+ case 0x8: /* FLDX.S */
+ error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
+ break;
+ case 0xd: /* FLDX.P */
+ error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
+ break;
+ case 0x9: /* FLDX.D */
+ error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+ case (0xb4>>2): /* FLD.S */
+ error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
+ break;
+ case (0xb8>>2): /* FLD.P */
+ error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
+ break;
+ case (0xbc>>2): /* FLD.D */
+ error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
+ break;
+ case (0x3c>>2): /* floating indexed stores */
+ switch (minor) {
+ case 0x8: /* FSTX.S */
+ error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
+ break;
+ case 0xd: /* FSTX.P */
+ error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
+ break;
+ case 0x9: /* FSTX.D */
+ error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+#endif
+
+ default:
+ /* Fault */
+ error = -1;
+ break;
+ }
+
+ if (error < 0) {
+ return error;
+ } else {
+ regs->pc += 4; /* Skip the instruction that's just been emulated */
+ return 0;
+ }
+
+}
+
+static ctl_table unaligned_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "kernel_reports",
+ .data = &kernel_mode_unaligned_fixup_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "user_reports",
+ .data = &user_mode_unaligned_fixup_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "user_enable",
+ .data = &user_mode_unaligned_fixup_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec},
+#endif
+ {}
+};
+
+static ctl_table unaligned_root[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "unaligned_fixup",
+ .mode = 0555,
+ unaligned_table
+ },
+ {}
+};
+
+static ctl_table sh64_root[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sh64",
+ .mode = 0555,
+ .child = unaligned_root
+ },
+ {}
+};
+static struct ctl_table_header *sysctl_header;
+static int __init init_sysctl(void)
+{
+ sysctl_header = register_sysctl_table(sh64_root);
+ return 0;
+}
+
+__initcall(init_sysctl);
+
+
+asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
+{
+ u64 peek_real_address_q(u64 addr);
+ u64 poke_real_address_q(u64 addr, u64 val);
+ unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
+ unsigned long long exp_cause;
+ /* It's not worth ioremapping the debug module registers for the amount
+ of access we make to them - just go direct to their physical
+ addresses. */
+ exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
+ if (exp_cause & ~4) {
+ printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
+ (unsigned long)(exp_cause & 0xffffffff));
+ }
+ show_state();
+ /* Clear all DEBUGINT causes */
+ poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
+}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 0956fb3681a..d7d4991f32a 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -1,138 +1,5 @@
-/*
- * ld script to make SuperH Linux kernel
- * Written by Niibe Yutaka
- */
-#include <asm/thread_info.h>
-#include <asm/cache.h>
-#include <asm-generic/vmlinux.lds.h>
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
+#ifdef CONFIG_SUPERH32
+# include "vmlinux_32.lds.S"
#else
-OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
+# include "vmlinux_64.lds.S"
#endif
-OUTPUT_ARCH(sh)
-ENTRY(_start)
-SECTIONS
-{
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
- _text = .; /* Text and read-only data */
-
- .empty_zero_page : {
- *(.empty_zero_page)
- } = 0
-
- .text : {
- *(.text.head)
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.fixup)
- *(.gnu.warning)
- } = 0x0009
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
- _etext = .; /* End of text section */
-
- BUG_TABLE
- NOTES
- RO_DATA(PAGE_SIZE)
-
- . = ALIGN(THREAD_SIZE);
- .data : { /* Data */
- *(.data.init_task)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.read_mostly)
-
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- DATA_DATA
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(PAGE_SIZE); /* Init code and data */
- __init_begin = .;
- _sinittext = .;
- .init.text : { *(.init.text) }
- _einittext = .;
- .init.data : { *(.init.data) }
-
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
-
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- . = ALIGN(4);
- __machvec_start = .;
- .machvec.init : { *(.machvec.init) }
- __machvec_end = .;
-
- PERCPU(PAGE_SIZE)
-
- /*
- * .exit.text is discarded at runtime, not link time, to deal with
- * references from __bug_table
- */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
-
- . = ALIGN(PAGE_SIZE);
- .bss : {
- __init_end = .;
- __bss_start = .; /* BSS */
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = .; /* uClinux MTD sucks */
- _end = . ;
- }
-
- /*
- * When something in the kernel is NOT compiled as a module, the
- * module cleanup code and data are put into these segments. Both
- * can then be thrown away, as cleanup code is never called unless
- * it's a module.
- */
- /DISCARD/ : {
- *(.exitcall.exit)
- }
-
- STABS_DEBUG
- DWARF_DEBUG
-}
diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
new file mode 100644
index 00000000000..c7113786ecd
--- /dev/null
+++ b/arch/sh/kernel/vmlinux_32.lds.S
@@ -0,0 +1,152 @@
+/*
+ * ld script to make SuperH Linux kernel
+ * Written by Niibe Yutaka
+ */
+#include <asm/thread_info.h>
+#include <asm/cache.h>
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
+#else
+OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
+#endif
+OUTPUT_ARCH(sh)
+ENTRY(_start)
+SECTIONS
+{
+#ifdef CONFIG_32BIT
+ . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+#else
+ . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
+#endif
+
+ _text = .; /* Text and read-only data */
+
+ .empty_zero_page : {
+ *(.empty_zero_page)
+ } = 0
+
+ .text : {
+ *(.text.head)
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0x0009
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ _etext = .; /* End of text section */
+
+ BUG_TABLE
+ NOTES
+ RO_DATA(PAGE_SIZE)
+
+ /*
+ * Code which must be executed uncached and the associated data
+ */
+ . = ALIGN(PAGE_SIZE);
+ __uncached_start = .;
+ .uncached.text : { *(.uncached.text) }
+ .uncached.data : { *(.uncached.data) }
+ __uncached_end = .;
+
+ . = ALIGN(THREAD_SIZE);
+ .data : { /* Data */
+ *(.data.init_task)
+
+ . = ALIGN(L1_CACHE_BYTES);
+ *(.data.cacheline_aligned)
+
+ . = ALIGN(L1_CACHE_BYTES);
+ *(.data.read_mostly)
+
+ . = ALIGN(PAGE_SIZE);
+ *(.data.page_aligned)
+
+ __nosave_begin = .;
+ *(.data.nosave)
+ . = ALIGN(PAGE_SIZE);
+ __nosave_end = .;
+
+ DATA_DATA
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ _sinittext = .;
+ .init.text : { INIT_TEXT }
+ _einittext = .;
+ .init.data : { INIT_DATA }
+
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+
+ __initcall_start = .;
+ .initcall.init : {
+ INITCALLS
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+
+ SECURITY_INIT
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ . = ALIGN(PAGE_SIZE);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+#endif
+
+ . = ALIGN(4);
+ __machvec_start = .;
+ .machvec.init : { *(.machvec.init) }
+ __machvec_end = .;
+
+ PERCPU(PAGE_SIZE)
+
+ /*
+ * .exit.text is discarded at runtime, not link time, to deal with
+ * references from __bug_table
+ */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ .bss : {
+ __init_end = .;
+ __bss_start = .; /* BSS */
+ *(.bss.page_aligned)
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(4);
+ _ebss = .; /* uClinux MTD sucks */
+ _end = . ;
+ }
+
+ /*
+ * When something in the kernel is NOT compiled as a module, the
+ * module cleanup code and data are put into these segments. Both
+ * can then be thrown away, as cleanup code is never called unless
+ * it's a module.
+ */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+}
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
new file mode 100644
index 00000000000..3f1bd6392bb
--- /dev/null
+++ b/arch/sh/kernel/vmlinux_64.lds.S
@@ -0,0 +1,164 @@
+/*
+ * ld script to make SH64 Linux kernel
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * benedict.gaster@superh.com: 2nd May 2002
+ * Add definition of empty_zero_page to be the first page of kernel image.
+ *
+ * benedict.gaster@superh.com: 3rd May 2002
+ * Added support for ramdisk, removing statically linked romfs at the
+ * same time.
+ *
+ * lethal@linux-sh.org: 9th May 2003
+ * Kill off GLOBAL_NAME() usage and other CDC-isms.
+ *
+ * lethal@linux-sh.org: 19th May 2003
+ * Remove support for ancient toolchains.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+#define LOAD_OFFSET CONFIG_PAGE_OFFSET
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(sh:sh5)
+
+#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
+
+ENTRY(__start)
+SECTIONS
+{
+ . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
+ _text = .; /* Text and read-only data */
+
+ .empty_zero_page : C_PHYS(.empty_zero_page) {
+ *(.empty_zero_page)
+ } = 0
+
+ .text : C_PHYS(.text) {
+ *(.text.head)
+ TEXT_TEXT
+ *(.text64)
+ *(.text..SHmedia32)
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+#ifdef CONFIG_LITTLE_ENDIAN
+ } = 0x6ff0fff0
+#else
+ } = 0xf0fff06f
+#endif
+
+ /* We likely want __ex_table to be Cache Line aligned */
+ . = ALIGN(L1_CACHE_BYTES); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
+ __stop___ex_table = .;
+
+ _etext = .; /* End of text section */
+
+ BUG_TABLE
+ NOTES
+ RO_DATA(PAGE_SIZE)
+
+ . = ALIGN(THREAD_SIZE);
+ .data : C_PHYS(.data) { /* Data */
+ *(.data.init_task)
+
+ . = ALIGN(L1_CACHE_BYTES);
+ *(.data.cacheline_aligned)
+
+ . = ALIGN(L1_CACHE_BYTES);
+ *(.data.read_mostly)
+
+ . = ALIGN(PAGE_SIZE);
+ *(.data.page_aligned)
+
+ __nosave_begin = .;
+ *(.data.nosave)
+ . = ALIGN(PAGE_SIZE);
+ __nosave_end = .;
+
+ DATA_DATA
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ _sinittext = .;
+ .init.text : C_PHYS(.init.text) { INIT_TEXT }
+ _einittext = .;
+ .init.data : C_PHYS(.init.data) { INIT_DATA }
+ . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
+ __setup_start = .;
+ .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : C_PHYS(.initcall.init) {
+ INITCALLS
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : C_PHYS(.con_initcall.init) {
+ *(.con_initcall.init)
+ }
+ __con_initcall_end = .;
+
+ SECURITY_INIT
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ . = ALIGN(PAGE_SIZE);
+ __initramfs_start = .;
+ .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
+ __initramfs_end = .;
+#endif
+
+ . = ALIGN(8);
+ __machvec_start = .;
+ .machvec.init : C_PHYS(.machvec.init) { *(.machvec.init) }
+ __machvec_end = .;
+
+ PERCPU(PAGE_SIZE)
+
+ /*
+ * .exit.text is discarded at runtime, not link time, to deal with
+ * references from __bug_table
+ */
+ .exit.text : C_PHYS(.exit.text) { EXIT_TEXT }
+ .exit.data : C_PHYS(.exit.data) { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ .bss : C_PHYS(.bss) {
+ __init_end = .;
+ __bss_start = .; /* BSS */
+ *(.bss.page_aligned)
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(4);
+ _ebss = .; /* uClinux MTD sucks */
+ _end = . ;
+ }
+
+ /*
+ * When something in the kernel is NOT compiled as a module, the
+ * module cleanup code and data are put into these segments. Both
+ * can then be thrown away, as cleanup code is never called unless
+ * it's a module.
+ */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+}