summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu')
-rw-r--r--arch/sh/kernel/cpu/Makefile6
-rw-r--r--arch/sh/kernel/cpu/init.c74
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile4
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c257
-rw-r--r--arch/sh/kernel/cpu/irq/intc.c31
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S19
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c89
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c633
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c22
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c319
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S24
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c10
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c11
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c16
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c78
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c537
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c18
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c13
-rw-r--r--arch/sh/kernel/cpu/sh4/softfloat.c892
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c126
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c390
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c19
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c14
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile7
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2101
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c166
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S198
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c326
42 files changed, 6236 insertions, 374 deletions
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d055a3ea6b4..f471d242774 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -6,8 +6,14 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
+obj-$(CONFIG_CPU_SH5) = sh5/
+
+# Special cases for family ancestry.
+
obj-$(CONFIG_CPU_SH4A) += sh4a/
+# Common interfaces.
+
obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index c217c4bf008..80a31329ead 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -20,9 +21,12 @@
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
+#include <asm/elf.h>
#include <asm/io.h>
-#include <asm/ubc.h>
#include <asm/smp.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/ubc.h>
+#endif
/*
* Generic wrapper for command line arguments to disable on-chip
@@ -61,25 +65,12 @@ static void __init speculative_execution_init(void)
/*
* Generic first-level cache init
*/
-static void __init cache_init(void)
+#ifdef CONFIG_SUPERH32
+static void __uses_jump_to_uncached cache_init(void)
{
unsigned long ccr, flags;
- /* First setup the rest of the I-cache info */
- current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
- current_cpu_data.icache.linesz;
-
- current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
- current_cpu_data.icache.linesz;
-
- /* And the D-cache too */
- current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
- current_cpu_data.dcache.linesz;
-
- current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
- current_cpu_data.dcache.linesz;
-
- jump_to_P2();
+ jump_to_uncached();
ccr = ctrl_inl(CCR);
/*
@@ -156,7 +147,31 @@ static void __init cache_init(void)
#endif
ctrl_outl(flags, CCR);
- back_to_P1();
+ back_to_cached();
+}
+#else
+#define cache_init() do { } while (0)
+#endif
+
+#define CSHAPE(totalsize, linesize, assoc) \
+ ((totalsize & ~0xff) | (linesize << 4) | assoc)
+
+#define CACHE_DESC_SHAPE(desc) \
+ CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
+
+static void detect_cache_shape(void)
+{
+ l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
+
+ if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
+ l1i_cache_shape = l1d_cache_shape;
+ else
+ l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
+
+ if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
+ l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
+ else
+ l2_cache_shape = -1; /* No S-cache */
}
#ifdef CONFIG_SH_DSP
@@ -228,14 +243,32 @@ asmlinkage void __cpuinit sh_cpu_init(void)
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
+ /* First setup the rest of the I-cache info */
+ current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
+ current_cpu_data.icache.linesz;
+
+ current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
+ current_cpu_data.icache.linesz;
+
+ /* And the D-cache too */
+ current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
+ current_cpu_data.dcache.linesz;
+
+ current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
+ current_cpu_data.dcache.linesz;
+
/* Init the cache */
cache_init();
- if (raw_smp_processor_id() == 0)
+ if (raw_smp_processor_id() == 0) {
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
+ /* Boot CPU sets the cache shape */
+ detect_cache_shape();
+ }
+
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
@@ -273,7 +306,10 @@ asmlinkage void __cpuinit sh_cpu_init(void)
* like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
* we wake it up and hope that all is well.
*/
+#ifdef CONFIG_SUPERH32
if (raw_smp_processor_id() == 0)
ubc_wakeup();
+#endif
+
speculative_execution_init();
}
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8da8e178f09..cc1836e47a5 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,7 +1,9 @@
#
# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
#
-obj-y += imask.o intc.o
+obj-y += intc.o
+obj-$(CONFIG_SUPERH32) += imask.o
+obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
new file mode 100644
index 00000000000..43ee7a9a4f0
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -0,0 +1,257 @@
+/*
+ * arch/sh/kernel/cpu/irq/intc-sh5.c
+ *
+ * Interrupt Controller support for SH5 INTC.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * Per-interrupt selective. IRLM=0 (Fixed priority) is not
+ * supported being useless without a cascaded interrupt
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/cpu/irq.h>
+#include <asm/page.h>
+
+/*
+ * Maybe the generic Peripheral block could move to a more
+ * generic include file. INTC Block will be defined here
+ * and only here to make INTC self-contained in a single
+ * file.
+ */
+#define INTC_BLOCK_OFFSET 0x01000000
+
+/* Base */
+#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
+ INTC_BLOCK_OFFSET
+
+/* Address */
+#define INTC_ICR_SET (intc_virt + 0x0)
+#define INTC_ICR_CLEAR (intc_virt + 0x8)
+#define INTC_INTPRI_0 (intc_virt + 0x10)
+#define INTC_INTSRC_0 (intc_virt + 0x50)
+#define INTC_INTSRC_1 (intc_virt + 0x58)
+#define INTC_INTREQ_0 (intc_virt + 0x60)
+#define INTC_INTREQ_1 (intc_virt + 0x68)
+#define INTC_INTENB_0 (intc_virt + 0x70)
+#define INTC_INTENB_1 (intc_virt + 0x78)
+#define INTC_INTDSB_0 (intc_virt + 0x80)
+#define INTC_INTDSB_1 (intc_virt + 0x88)
+
+#define INTC_ICR_IRLM 0x1
+#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
+#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
+
+
+/*
+ * Mapper between the vector ordinal and the IRQ number
+ * passed to kernel/device drivers.
+ */
+int intc_evt_to_irq[(0xE20/0x20)+1] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
+ 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
+ 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
+ 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
+ -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
+ -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
+ 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
+ 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
+ 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
+ -1, -1 /* 0xE00 - 0xE20 */
+};
+
+/*
+ * Opposite mapper.
+ */
+static int IRQ_to_vectorN[NR_INTC_IRQS] = {
+ 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
+ -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
+ 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
+ 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
+ -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
+
+};
+
+static unsigned long intc_virt;
+
+static unsigned int startup_intc_irq(unsigned int irq);
+static void shutdown_intc_irq(unsigned int irq);
+static void enable_intc_irq(unsigned int irq);
+static void disable_intc_irq(unsigned int irq);
+static void mask_and_ack_intc(unsigned int);
+static void end_intc_irq(unsigned int irq);
+
+static struct hw_interrupt_type intc_irq_type = {
+ .typename = "INTC",
+ .startup = startup_intc_irq,
+ .shutdown = shutdown_intc_irq,
+ .enable = enable_intc_irq,
+ .disable = disable_intc_irq,
+ .ack = mask_and_ack_intc,
+ .end = end_intc_irq
+};
+
+static int irlm; /* IRL mode */
+
+static unsigned int startup_intc_irq(unsigned int irq)
+{
+ enable_intc_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void shutdown_intc_irq(unsigned int irq)
+{
+ disable_intc_irq(irq);
+}
+
+static void enable_intc_irq(unsigned int irq)
+{
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
+ printk("Trying to use straight IRL0-3 with an encoding platform.\n");
+
+ if (irq < 32) {
+ reg = INTC_INTENB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTENB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ ctrl_outl(bitmask, reg);
+}
+
+static void disable_intc_irq(unsigned int irq)
+{
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if (irq < 32) {
+ reg = INTC_INTDSB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTDSB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ ctrl_outl(bitmask, reg);
+}
+
+static void mask_and_ack_intc(unsigned int irq)
+{
+ disable_intc_irq(irq);
+}
+
+static void end_intc_irq(unsigned int irq)
+{
+ enable_intc_irq(irq);
+}
+
+/* For future use, if we ever support IRLM=0) */
+void make_intc_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].chip = &intc_irq_type;
+ disable_intc_irq(irq);
+}
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+int intc_irq_describe(char* p, int irq)
+{
+ if (irq < NR_INTC_IRQS)
+ return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
+ else
+ return 0;
+}
+#endif
+
+void __init plat_irq_setup(void)
+{
+ unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
+ unsigned long reg;
+ unsigned long data;
+ int i;
+
+ intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
+ if (!intc_virt) {
+ panic("Unable to remap INTC\n");
+ }
+
+
+ /* Set default: per-line enable/disable, priority driven ack/eoi */
+ for (i = 0; i < NR_INTC_IRQS; i++) {
+ if (platform_int_priority[i] != NO_PRIORITY) {
+ irq_desc[i].chip = &intc_irq_type;
+ }
+ }
+
+
+ /* Disable all interrupts and set all priorities to 0 to avoid trouble */
+ ctrl_outl(-1, INTC_INTDSB_0);
+ ctrl_outl(-1, INTC_INTDSB_1);
+
+ for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
+ ctrl_outl( NO_PRIORITY, reg);
+
+
+ /* Set IRLM */
+ /* If all the priorities are set to 'no priority', then
+ * assume we are using encoded mode.
+ */
+ irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
+ platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
+
+ if (irlm == NO_PRIORITY) {
+ /* IRLM = 0 */
+ reg = INTC_ICR_CLEAR;
+ i = IRQ_INTA;
+ printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+ } else {
+ /* IRLM = 1 */
+ reg = INTC_ICR_SET;
+ i = IRQ_IRL0;
+ }
+ ctrl_outl(INTC_ICR_IRLM, reg);
+
+ /* Set interrupt priorities according to platform description */
+ for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
+ data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
+ if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
+ /* Upon the 7th, set Priority Register */
+ ctrl_outl(data, reg);
+ data = 0;
+ reg += 8;
+ }
+ }
+
+ /*
+ * And now let interrupts come in.
+ * sti() is not enough, we need to
+ * lower priority, too.
+ */
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
index 6ac018c15e0..84806b2027f 100644
--- a/arch/sh/kernel/cpu/irq/intc.c
+++ b/arch/sh/kernel/cpu/irq/intc.c
@@ -335,31 +335,6 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
return 0;
}
-static unsigned int __init intc_prio_value(struct intc_desc *desc,
- intc_enum enum_id, int do_grps)
-{
- struct intc_prio *p = desc->priorities;
- unsigned int i;
-
- for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
- p = desc->priorities + i;
-
- if (p->enum_id != enum_id)
- continue;
-
- return p->priority;
- }
-
- if (do_grps)
- return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
-
- /* default to the lowest priority possible if no priority is set
- * - this needs to be at least 2 for 5-bit priorities on 7780
- */
-
- return 2;
-}
-
static unsigned int __init intc_mask_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id, int do_grps)
@@ -518,8 +493,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
handle_level_irq, "level");
set_irq_chip_data(irq, (void *)data[primary]);
- /* record the desired priority level */
- intc_prio_level[irq] = intc_prio_value(desc, enum_id, 1);
+ /* set priority level
+ * - this needs to be at least 2 for 5-bit priorities on 7780
+ */
+ intc_prio_level[irq] = 2;
/* enable secondary masking method if present */
if (data[!primary])
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index ee8f1fe84b0..7a26569e795 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -149,6 +149,14 @@ ENTRY(exception_handler)
mov #32,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 32 is trap
+
+#if defined(CONFIG_SH_FPU)
+ mov #13,r8
+ cmp/eq r8,r9
+ bt 10f ! fpu
+ nop
+#endif
+
mov.l 4f,r8
mov r9,r4
shll2 r9
@@ -158,6 +166,10 @@ ENTRY(exception_handler)
cmp/eq r9,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
+#if defined(CONFIG_SH_FPU)
+10:
+ mov.l 9f, r8 ! unhandled exception
+#endif
3:
mov.l 5f,r10
jmp @r8
@@ -177,7 +189,10 @@ interrupt_entry:
6: .long ret_from_irq
7: .long do_IRQ
8: .long do_exception_error
-
+#ifdef CONFIG_SH_FPU
+9: .long fpu_error_trap_handler
+#endif
+
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
@@ -250,7 +265,7 @@ ENTRY(sh_bios_handler)
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
-ENTRY(address_error_handler)
+ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
add #4,r4
mov #OFF_PC,r0
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index ec6adc3f306..b230eb278ce 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -65,7 +65,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
- NULL, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 965fa2572b2..b279cdc3a23 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -6,4 +6,8 @@ obj-y := common.o probe.o opcode_helper.o
common-y += $(addprefix ../sh2/, ex.o entry.o)
+obj-$(CONFIG_SH_FPU) += fpu.o
+
obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
new file mode 100644
index 00000000000..3feb95a4fcb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -0,0 +1,89 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+ *
+ * SH7203 support for the clock framework
+ *
+ * Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
+ *
+ * Based on clock-sh7263.c
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={8,12,16,0};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 0)
+#define PLL2 (1)
+#elif (CONFIG_SH_CLK_MD == 1)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 3)
+#define PLL2 (4)
+#else
+#error "Illegal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
+}
+
+static struct clk_ops sh7203_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7203_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx-2];
+}
+
+static struct clk_ops sh7203_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7203_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7203_clk_ops[] = {
+ &sh7203_master_clk_ops,
+ &sh7203_module_clk_ops,
+ &sh7203_bus_clk_ops,
+ &sh7203_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7203_clk_ops))
+ *ops = sh7203_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
new file mode 100644
index 00000000000..ff99562456f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -0,0 +1,633 @@
+/*
+ * Save/restore floating point context for signal handlers.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FIXME! These routines can be optimized in big endian case.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+/* The PR (precision) bit in the FP Status Register must be clear when
+ * an frchg instruction is executed, otherwise the instruction is undefined.
+ * Executing frchg with PR set causes a trap on some SH4 implementations.
+ */
+
+#define FPSCR_RCHG 0x00000000
+
+
+/*
+ * Save FPU registers onto task structure.
+ * Assume called with FPU enabled (SR.FD=0).
+ */
+void
+save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ unsigned long dummy;
+
+ clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ enable_fpu();
+ asm volatile("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t"
+ : "=r" (dummy)
+ : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+ "r" (FPSCR_RCHG),
+ "r" (FPSCR_INIT)
+ : "memory");
+
+ disable_fpu();
+ release_fpu(regs);
+}
+
+static void
+restore_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile("fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ : "=r" (dummy)
+ : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+ : "memory");
+ disable_fpu();
+}
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using
+ * has the property that no matter wether considered as single or as
+ * double precission represents signaling NANS.
+ */
+
+static void
+fpu_init(void)
+{
+ enable_fpu();
+ asm volatile("lds %0, fpul\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "lds %2, fpscr\n\t"
+ : /* no output */
+ : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
+ disable_fpu();
+}
+
+/*
+ * Emulate arithmetic ops on denormalized number for some FPU insns.
+ */
+
+/* denormalized float * float */
+static int denormal_mulf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ unsigned long long m, n;
+ int exp, w;
+
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000 || ix == 0)
+ return ((hx ^ hy) & 0x80000000);
+
+ exp = (iy & 0x7f800000) >> 23;
+ ix &= 0x007fffff;
+ iy = (iy & 0x007fffff) | 0x00800000;
+ m = (unsigned long long)ix * iy;
+ n = m;
+ w = -1;
+ while (n) { n >>= 1; w++; }
+
+ /* FIXME: use guard bits */
+ exp += w - 126 - 46;
+ if (exp > 0)
+ ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
+ else if (exp + 22 >= 0)
+ ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x80000000;
+ return ix;
+}
+
+/* denormalized double * double */
+static void mult64(unsigned long long x, unsigned long long y,
+ unsigned long long *highp, unsigned long long *lowp)
+{
+ unsigned long long sub0, sub1, sub2, sub3;
+ unsigned long long high, low;
+
+ sub0 = (x >> 32) * (unsigned long) (y >> 32);
+ sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
+ sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
+ sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
+ low = sub3;
+ high = 0LL;
+ sub3 += (sub1 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ sub3 += (sub2 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ high += (sub1 >> 32) + (sub2 >> 32);
+ high += sub0;
+ *lowp = low;
+ *highp = high;
+}
+
+static inline long long rshift64(unsigned long long mh,
+ unsigned long long ml, int n)
+{
+ if (n >= 64)
+ return mh >> (n - 64);
+ return (mh << (64 - n)) | (ml >> n);
+}
+
+static long long denormal_muld(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ unsigned long long mh, ml, nh, nl;
+ int exp, w;
+
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL || ix == 0)
+ return ((hx ^ hy) & 0x8000000000000000LL);
+
+ exp = (iy & 0x7ff0000000000000LL) >> 52;
+ ix &= 0x000fffffffffffffLL;
+ iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ mult64(ix, iy, &mh, &ml);
+ nh = mh;
+ nl = ml;
+ w = -1;
+ if (nh) {
+ while (nh) { nh >>= 1; w++;}
+ w += 64;
+ } else
+ while (nl) { nl >>= 1; w++;}
+
+ /* FIXME: use guard bits */
+ exp += w - 1022 - 52 * 2;
+ if (exp > 0)
+ ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
+ | ((long long)exp << 52);
+ else if (exp + 51 >= 0)
+ ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x8000000000000000LL;
+ return ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static int denormal_subf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix - iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac -= iy;
+ while (frac < 0x00800000) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static int denormal_addf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix + iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac += iy;
+ if (frac >= 0x01000000) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+static int denormal_addf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ int sign;
+
+ if ((hx ^ hy) & 0x80000000) {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000) {
+ ix = denormal_subf1(ix, iy);
+ if (ix < 0) {
+ ix = -ix;
+ sign ^= 0x80000000;
+ }
+ } else {
+ ix = denormal_subf1(iy, ix);
+ sign ^= 0x80000000;
+ }
+ } else {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000)
+ ix = denormal_addf1(ix, iy);
+ else
+ ix = denormal_addf1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ int exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix - iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac -= iy;
+ while (frac < 0x0010000000000000LL) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ long long exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix + iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac += iy;
+ if (frac >= 0x0020000000000000LL) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+static long long denormal_addd(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ long long sign;
+
+ if ((hx ^ hy) & 0x8000000000000000LL) {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL) {
+ ix = denormal_subd1(ix, iy);
+ if (ix < 0) {
+ ix = -ix;
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ ix = denormal_subd1(iy, ix);
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL)
+ ix = denormal_addd1(ix, iy);
+ else
+ ix = denormal_addd1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/**
+ * denormal_to_double - Given denormalized float number,
+ * store double float
+ *
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
+ */
+static void
+denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+{
+ unsigned long du, dl;
+ unsigned long x = fpu->fpul;
+ int exp = 1023 - 126;
+
+ if (x != 0 && (x & 0x7f800000) == 0) {
+ du = (x & 0x80000000);
+ while ((x & 0x00800000) == 0) {
+ x <<= 1;
+ exp--;
+ }
+ x &= 0x007fffff;
+ du |= (exp << 20) | (x >> 3);
+ dl = x << 29;
+
+ fpu->fp_regs[n] = du;
+ fpu->fp_regs[n+1] = dl;
+ }
+}
+
+/**
+ * ieee_fpe_handler - Handle denormalized number exception
+ *
+ * @regs: Pointer to register structure
+ *
+ * Returns 1 when it's handled (should not cause exception).
+ */
+static int
+ieee_fpe_handler (struct pt_regs *regs)
+{
+ unsigned short insn = *(unsigned short *) regs->pc;
+ unsigned short finsn;
+ unsigned long nextpc;
+ int nib[4] = {
+ (insn >> 12) & 0xf,
+ (insn >> 8) & 0xf,
+ (insn >> 4) & 0xf,
+ insn & 0xf};
+
+ if (nib[0] == 0xb ||
+ (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
+ regs->pr = regs->pc + 4;
+ if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ else
+ nextpc = regs->pc + 4;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4;
+ else
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x4 && nib[3] == 0xb &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
+ nextpc = regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
+ nextpc = regs->pc + 4 + regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (insn == 0x000b) { /* rts */
+ nextpc = regs->pr;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else {
+ nextpc = regs->pc + 2;
+ finsn = insn;
+ }
+
+#define FPSCR_FPU_ERROR (1 << 17)
+
+ if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
+ struct task_struct *tsk = current;
+
+ if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
+ /* FPU error */
+ denormal_to_double (&tsk->thread.fpu.hard,
+ (finsn >> 8) & 0xf);
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m+1];
+ if ((hx & 0x7fffffff) >= 0x00100000)
+ llx = denormal_muld(lly, llx);
+ else
+ llx = denormal_muld(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((hx & 0x7fffffff) >= 0x00800000)
+ hx = denormal_mulf(hy, hx);
+ else
+ hx = denormal_mulf(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m+1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = denormal_addd(llx, lly);
+ else
+ llx = denormal_addd(llx, lly ^ (1LL << 63));
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = denormal_addf(hx, hy);
+ else
+ hx = denormal_addf(hx, hy ^ 0x80000000);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ }
+
+ return 0;
+}
+
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ save_fpu(tsk, regs);
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.fpu.hard.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ return;
+ }
+
+ force_sig(SIGFPE, tsk);
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ grab_fpu(regs);
+ if (!user_mode(regs)) {
+ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ return;
+ }
+
+ if (used_math()) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ set_used_math();
+ }
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6d02465704b..6910e266446 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -3,25 +3,36 @@
*
* CPU Subtype Probing for SH-2A.
*
- * Copyright (C) 2004, 2005 Paul Mundt
+ * Copyright (C) 2004 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
int __init detect_cpu_and_cache_system(void)
{
- /* Just SH7206 for now .. */
- boot_cpu_data.type = CPU_SH7206;
+ /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
boot_cpu_data.flags |= CPU_HAS_OP32;
+#if defined(CONFIG_CPU_SUBTYPE_SH7203)
+ boot_cpu_data.type = CPU_SH7203;
+ /* SH7203 has an FPU.. */
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
+ boot_cpu_data.type = CPU_SH7263;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+ boot_cpu_data.type = CPU_SH7206;
+ /* While SH7206 has a DSP.. */
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+#endif
+
boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.dcache.way_incr = (1 << 11);
+ boot_cpu_data.dcache.way_incr = (1 << 11);
boot_cpu_data.dcache.sets = 128;
boot_cpu_data.dcache.entry_shift = 4;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
@@ -37,4 +48,3 @@ int __init detect_cpu_and_cache_system(void)
return 0;
}
-
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
new file mode 100644
index 00000000000..db6ef5cecde
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -0,0 +1,319 @@
+/*
+ * SH7203 and SH7263 Setup
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
+ DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
+ DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
+ DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
+ USB, LCDC, CMT0, CMT1, BSC, WDT,
+ MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
+ MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
+ MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
+ MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
+ MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
+ MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
+ ADC_ADI,
+ IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
+ IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
+ IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
+ IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
+ SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
+ SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
+ SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
+ SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
+ SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
+ SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
+ SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
+
+ /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
+ ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
+ ROMDEC_IREADY,
+
+ FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+
+ SDHI3, SDHI0, SDHI1,
+
+ RTC_ARM, RTC_PRD, RTC_CUP,
+ RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
+ RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
+
+ SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
+
+ /* interrupt groups */
+ PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD,
+ IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
+ SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+ INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
+ INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
+ INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
+ INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
+ INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
+ INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
+ INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
+ INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
+ INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
+ INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
+ INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
+ INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
+ INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
+ INTC_IRQ(MTU2_TCI0V, 150),
+ INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
+ INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
+ INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
+ INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
+ INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
+ INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
+ INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
+ INTC_IRQ(MTU2_TCI3V, 165),
+ INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
+ INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
+ INTC_IRQ(MTU2_TCI4V, 170),
+ INTC_IRQ(ADC_ADI, 171),
+ INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
+ INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
+ INTC_IRQ(IIC30_TEI, 176),
+ INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
+ INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
+ INTC_IRQ(IIC31_TEI, 181),
+ INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
+ INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
+ INTC_IRQ(IIC32_TEI, 186),
+ INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
+ INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
+ INTC_IRQ(IIC33_TEI, 191),
+ INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
+ INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
+ INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
+ INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
+ INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
+ INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
+ INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
+ INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
+ INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
+ INTC_IRQ(SSU0_SSTXI, 210),
+ INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
+ INTC_IRQ(SSU1_SSTXI, 213),
+ INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
+ INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
+ INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
+ INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
+ INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
+ INTC_IRQ(RTC_CUP, 233),
+ INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
+ INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
+ INTC_IRQ(RCAN0_SLE, 238),
+ INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
+ INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
+ INTC_IRQ(RCAN1_SLE, 243),
+
+ /* SH7263-specific trash */
+#ifdef CONFIG_CPU_SUBTYPE_SH7263
+ INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
+ INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
+ INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
+
+ INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
+
+ INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
+ INTC_IRQ(SRC_IDEI, 246),
+
+ INTC_IRQ(IEBI, 247),
+#endif
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+ INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
+ INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
+ INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
+ INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
+ INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
+ INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
+ INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
+ INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
+ INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
+ INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
+ INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
+ INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
+ INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
+ INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
+ INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
+ INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
+ INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
+ IIC30_TEI),
+ INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
+ IIC31_TEI),
+ INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
+ IIC32_TEI),
+ INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
+ IIC33_TEI),
+ INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
+ INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
+ INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
+ INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
+ INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
+ FLCTL_FLTREQ1I),
+ INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
+ INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
+ RCAN0_SLE),
+ INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
+ RCAN1_SLE),
+
+#ifdef CONFIG_CPU_SUBTYPE_SH7263
+ INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
+ ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
+ INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
+ INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
+#endif
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
+ MTU2_VU } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
+ MTU2_TCI4V } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
+#ifdef CONFIG_CPU_SUBTYPE_SH7203
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, 0 } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
+#else
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, ROMDEC } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
+#endif
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 193, 194, 195, 192 },
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 197, 198, 199, 196 },
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 201, 202, 203, 200 },
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 205, 206, 207, 204 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffff2000,
+ .end = 0xffff2000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 232,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 233,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 231,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct platform_device *sh7203_devices[] __initdata = {
+ &sci_device,
+ &rtc_device,
+};
+
+static int __init sh7203_devices_setup(void)
+{
+ return platform_add_devices(sh7203_devices,
+ ARRAY_SIZE(sh7203_devices));
+}
+__initcall(sh7203_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index bd745aa8722..a564425b905 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -167,7 +167,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
- NULL, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 646eb693361..3ae4d9111f1 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o
@@ -21,5 +22,6 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o
clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o
clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
new file mode 100644
index 00000000000..54f54df51ef
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -0,0 +1,71 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7712.c
+ *
+ * SH7712 support for the clock framework
+ *
+ * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk>
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int multipliers[] = { 1, 2, 3 };
+static int divisors[] = { 1, 2, 3, 4, 6 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = (frqcr & 0x0300) >> 8;
+
+ clk->rate *= multipliers[idx];
+}
+
+static struct clk_ops sh7712_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = frqcr & 0x0007;
+
+ clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops sh7712_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = (frqcr & 0x0030) >> 4;
+
+ clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops sh7712_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7712_clk_ops[] = {
+ &sh7712_master_clk_ops,
+ &sh7712_module_clk_ops,
+ &sh7712_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7712_clk_ops))
+ *ops = sh7712_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 0d12a124055..4004073f98c 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,8 +13,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
#include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/page.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -409,6 +410,27 @@ ENTRY(handle_exception)
! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
+
+#ifdef CONFIG_GUSA
+ ! Check for roll back gRB (User and Kernel)
+ mov r15, k0
+ shll k0
+ bf/s 1f
+ shll k0
+ bf/s 1f
+ stc spc, k1
+ stc r0_bank, k0
+ cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
+ bt/s 2f
+ stc r1_bank, k1
+
+ add #-2, k0
+ add r15, k0
+ ldc k0, spc ! PC = saved r0 + r15 - 2
+2: mov k1, r15 ! SP = r1
+1:
+#endif
+
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index b6abf38d3a8..11b6d9c6eda 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -36,7 +36,7 @@ ENTRY(exception_handling_table)
.long exception_error ! address error store /* 100 */
#endif
#if defined(CONFIG_SH_FPU)
- .long do_fpu_error /* 120 */
+ .long fpu_error_trap_handler /* 120 */
#else
.long exception_error /* 120 */
#endif
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf579e061e0..fcc80bb7bee 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,11 +16,11 @@
#include <asm/cache.h>
#include <asm/io.h>
-int __init detect_cpu_and_cache_system(void)
+int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
- jump_to_P2();
+ jump_to_uncached();
/*
* Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
ctrl_outl(data0&~SH_CACHE_VALID, addr0);
ctrl_outl(data2&~SH_CACHE_VALID, addr1);
- back_to_P1();
+ back_to_cached();
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.dcache.entry_shift = 4;
@@ -84,6 +84,9 @@ int __init detect_cpu_and_cache_system(void)
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
boot_cpu_data.type = CPU_SH7720;
#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7721)
+ boot_cpu_data.type = CPU_SH7721;
+#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
boot_cpu_data.type = CPU_SH7705;
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index f6c65f2659e..dd0a20a685f 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -66,12 +66,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF0, 3),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
@@ -85,7 +79,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -93,7 +87,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 60b04b1f945..969804bb523 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -81,13 +81,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(SCI, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF0, 3),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
@@ -109,7 +102,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
@@ -120,7 +113,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
static struct resource rtc_resources[] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 84e5629fa84..0cc0e2bf135 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -73,18 +73,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(DMAC1, 7),
- INTC_PRIO(DMAC2, 7),
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SIOF0, 3),
- INTC_PRIO(SIOF1, 3),
- INTC_PRIO(EDMAC0, 5),
- INTC_PRIO(EDMAC1, 5),
- INTC_PRIO(EDMAC2, 5),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
@@ -101,7 +89,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -109,7 +97,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct resource rtc_resources[] = {
[0] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a0929b8a95a..3855ea4c21c 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -85,9 +85,62 @@ static struct platform_device sci_device = {
},
};
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xA4428000,
+ .end = 0xA44280FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 67,
+ .end = 67,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .name = "sh_udc",
+ .start = 0xA4420000,
+ .end = 0xA44200FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "sh_udc",
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
static struct platform_device *sh7720_devices[] __initdata = {
&rtc_device,
&sci_device,
+ &usb_ohci_device,
+ &usbf_device,
};
static int __init sh7720_devices_setup(void)
@@ -127,8 +180,11 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800),
INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
- INTC_VECT(SSL, 0x980), INTC_VECT(USBFI0, 0xa20),
- INTC_VECT(USBFI1, 0xa40), INTC_VECT(USBHI, 0xa60),
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ INTC_VECT(SSL, 0x980),
+#endif
+ INTC_VECT(USBFI0, 0xa20), INTC_VECT(USBFI1, 0xa40),
+ INTC_VECT(USBHI, 0xa60),
INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
@@ -153,22 +209,16 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 2),
- INTC_PRIO(SCIF1, 2),
- INTC_PRIO(DMAC1, 1),
- INTC_PRIO(DMAC2, 1),
- INTC_PRIO(RTC, 2),
- INTC_PRIO(TMU, 2),
- INTC_PRIO(TPU, 2),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
+#else
+ { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
+#endif
{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
@@ -177,7 +227,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
static struct intc_sense_reg sense_registers[] __initdata = {
{ INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
@@ -190,7 +240,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
- NULL, priorities, NULL, prio_registers, sense_registers);
+ NULL, NULL, prio_registers, sense_registers);
void __init plat_irq_setup_pins(int mode)
{
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index dadd6bffc12..d608557c7a3 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -5,7 +5,7 @@
obj-y := probe.o common.o
common-y += $(addprefix ../sh3/, entry.o ex.o)
-obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
# CPU subtype setup
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index c5a4fc77fa0..817f9939cda 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -1,7 +1,4 @@
-/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
- *
- * linux/arch/sh/kernel/fpu.c
- *
+/*
* Save/restore floating point context for signal handlers.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,15 +6,16 @@
* for more details.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
*
- * FIXME! These routines can be optimized in big endian case.
+ * FIXME! These routines have not been tested for big endian case.
*/
-
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/io.h>
+#include <asm/cpu/fpu.h>
#include <asm/processor.h>
#include <asm/system.h>
-#include <asm/io.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
@@ -25,177 +23,184 @@
*/
#define FPSCR_RCHG 0x00000000
+extern unsigned long long float64_div(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_mul(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_add(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_sub(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
+static unsigned int fpu_exception_flags;
/*
* Save FPU registers onto task structure.
* Assume called with FPU enabled (SR.FD=0).
*/
-void
-save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
unsigned long dummy;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
- asm volatile("sts.l fpul, @-%0\n\t"
- "sts.l fpscr, @-%0\n\t"
- "lds %2, fpscr\n\t"
- "frchg\n\t"
- "fmov.s fr15, @-%0\n\t"
- "fmov.s fr14, @-%0\n\t"
- "fmov.s fr13, @-%0\n\t"
- "fmov.s fr12, @-%0\n\t"
- "fmov.s fr11, @-%0\n\t"
- "fmov.s fr10, @-%0\n\t"
- "fmov.s fr9, @-%0\n\t"
- "fmov.s fr8, @-%0\n\t"
- "fmov.s fr7, @-%0\n\t"
- "fmov.s fr6, @-%0\n\t"
- "fmov.s fr5, @-%0\n\t"
- "fmov.s fr4, @-%0\n\t"
- "fmov.s fr3, @-%0\n\t"
- "fmov.s fr2, @-%0\n\t"
- "fmov.s fr1, @-%0\n\t"
- "fmov.s fr0, @-%0\n\t"
- "frchg\n\t"
- "fmov.s fr15, @-%0\n\t"
- "fmov.s fr14, @-%0\n\t"
- "fmov.s fr13, @-%0\n\t"
- "fmov.s fr12, @-%0\n\t"
- "fmov.s fr11, @-%0\n\t"
- "fmov.s fr10, @-%0\n\t"
- "fmov.s fr9, @-%0\n\t"
- "fmov.s fr8, @-%0\n\t"
- "fmov.s fr7, @-%0\n\t"
- "fmov.s fr6, @-%0\n\t"
- "fmov.s fr5, @-%0\n\t"
- "fmov.s fr4, @-%0\n\t"
- "fmov.s fr3, @-%0\n\t"
- "fmov.s fr2, @-%0\n\t"
- "fmov.s fr1, @-%0\n\t"
- "fmov.s fr0, @-%0\n\t"
- "lds %3, fpscr\n\t"
- : "=r" (dummy)
- : "0" ((char *)(&tsk->thread.fpu.hard.status)),
- "r" (FPSCR_RCHG),
- "r" (FPSCR_INIT)
- : "memory");
-
- disable_fpu();
- release_fpu(regs);
+ asm volatile ("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "lds %2, fpscr\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t":"=r" (dummy)
+ :"0"((char *)(&tsk->thread.fpu.hard.status)),
+ "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
+ :"memory");
+
+ disable_fpu();
+ release_fpu(regs);
}
-static void
-restore_fpu(struct task_struct *tsk)
+static void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- enable_fpu();
- asm volatile("lds %2, fpscr\n\t"
- "fmov.s @%0+, fr0\n\t"
- "fmov.s @%0+, fr1\n\t"
- "fmov.s @%0+, fr2\n\t"
- "fmov.s @%0+, fr3\n\t"
- "fmov.s @%0+, fr4\n\t"
- "fmov.s @%0+, fr5\n\t"
- "fmov.s @%0+, fr6\n\t"
- "fmov.s @%0+, fr7\n\t"
- "fmov.s @%0+, fr8\n\t"
- "fmov.s @%0+, fr9\n\t"
- "fmov.s @%0+, fr10\n\t"
- "fmov.s @%0+, fr11\n\t"
- "fmov.s @%0+, fr12\n\t"
- "fmov.s @%0+, fr13\n\t"
- "fmov.s @%0+, fr14\n\t"
- "fmov.s @%0+, fr15\n\t"
- "frchg\n\t"
- "fmov.s @%0+, fr0\n\t"
- "fmov.s @%0+, fr1\n\t"
- "fmov.s @%0+, fr2\n\t"
- "fmov.s @%0+, fr3\n\t"
- "fmov.s @%0+, fr4\n\t"
- "fmov.s @%0+, fr5\n\t"
- "fmov.s @%0+, fr6\n\t"
- "fmov.s @%0+, fr7\n\t"
- "fmov.s @%0+, fr8\n\t"
- "fmov.s @%0+, fr9\n\t"
- "fmov.s @%0+, fr10\n\t"
- "fmov.s @%0+, fr11\n\t"
- "fmov.s @%0+, fr12\n\t"
- "fmov.s @%0+, fr13\n\t"
- "fmov.s @%0+, fr14\n\t"
- "fmov.s @%0+, fr15\n\t"
- "frchg\n\t"
- "lds.l @%0+, fpscr\n\t"
- "lds.l @%0+, fpul\n\t"
- : "=r" (dummy)
- : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
- : "memory");
+ enable_fpu();
+ asm volatile ("lds %2, fpscr\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ :"=r" (dummy)
+ :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
+ :"memory");
disable_fpu();
}
/*
* Load the FPU with signalling NANS. This bit pattern we're using
* has the property that no matter wether considered as single or as
- * double precision represents signaling NANS.
+ * double precision represents signaling NANS.
*/
-static void
-fpu_init(void)
+static void fpu_init(void)
{
enable_fpu();
- asm volatile("lds %0, fpul\n\t"
- "lds %1, fpscr\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
- disable_fpu();
+ asm volatile ( "lds %0, fpul\n\t"
+ "lds %1, fpscr\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "frchg\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
+ "frchg\n\t"
+ "lds %2, fpscr\n\t"
+ : /* no output */
+ :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
+ disable_fpu();
}
/**
- * denormal_to_double - Given denormalized float number,
- * store double float
+ * denormal_to_double - Given denormalized float number,
+ * store double float
*
- * @fpu: Pointer to sh_fpu_hard structure
- * @n: Index to FP register
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
*/
-static void
-denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
@@ -212,7 +217,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
dl = x << 29;
fpu->fp_regs[n] = du;
- fpu->fp_regs[n+1] = dl;
+ fpu->fp_regs[n + 1] = dl;
}
}
@@ -223,68 +228,191 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
*
* Returns 1 when it's handled (should not cause exception).
*/
-static int
-ieee_fpe_handler (struct pt_regs *regs)
+static int ieee_fpe_handler(struct pt_regs *regs)
{
- unsigned short insn = *(unsigned short *) regs->pc;
+ unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
- insn & 0xf};
-
- if (nib[0] == 0xb ||
- (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
- regs->pr = regs->pc + 4;
-
- if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
- nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
+ insn & 0xf
+ };
+
+ if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
+ regs->pr = regs->pc + 4; /* bsr & jsr */
+
+ if (nib[0] == 0xa || nib[0] == 0xb) {
+ /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) {
+ /* bt/s */
if (regs->sr & 1)
- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) {
+ /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
- finsn = *(unsigned short *) (regs->pc + 2);
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
- (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* jmp & jsr */
nextpc = regs->regs[nib[1]];
- finsn = *(unsigned short *) (regs->pc + 2);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
- (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
- finsn = *(unsigned short *) (regs->pc + 2);
- } else if (insn == 0x000b) { /* rts */
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (insn == 0x000b) {
+ /* rts */
nextpc = regs->pr;
- finsn = *(unsigned short *) (regs->pc + 2);
+ finsn = *(unsigned short *)(regs->pc + 2);
} else {
nextpc = regs->pc + instruction_size(insn);
finsn = insn;
}
- if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
+ if ((finsn & 0xf1ff) == 0xf0ad) {
+ /* fcnvsd */
struct task_struct *tsk = current;
save_fpu(tsk, regs);
- if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
+ if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
- denormal_to_double (&tsk->thread.fpu.hard,
- (finsn >> 8) & 0xf);
- tsk->thread.fpu.hard.fpscr &=
- ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
- grab_fpu(regs);
- restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ denormal_to_double(&tsk->thread.fpu.hard,
+ (finsn >> 8) & 0xf);
+ else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) {
+ /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+ llx = float64_mul(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_mul(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) {
+ /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = float64_add(llx, lly);
+ else
+ llx = float64_sub(llx, lly);
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = float32_add(hx, hy);
+ else
+ hx = float32_sub(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf003) == 0xf003) {
+ /* fdiv */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.fpu.hard.fp_regs[n];
+ hy = tsk->thread.fpu.hard.fp_regs[m];
+ fpscr = tsk->thread.fpu.hard.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.fpu.hard.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.fpu.hard.fp_regs[m + 1];
+
+ llx = float64_div(llx, lly);
+
+ tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
+ tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_div(hx, hy);
+ tsk->thread.fpu.hard.fp_regs[n] = hx;
} else
- force_sig(SIGFPE, tsk);
+ return 0;
regs->pc = nextpc;
return 1;
@@ -293,27 +421,48 @@ ieee_fpe_handler (struct pt_regs *regs)
return 0;
}
-asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7, struct pt_regs __regs)
+void float_raise(unsigned int flags)
+{
+ fpu_exception_flags |= flags;
+}
+
+int float_rounding_mode(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
+ int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
+ return roundingMode;
+}
- if (ieee_fpe_handler(regs))
- return;
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
- regs->pc += 2;
save_fpu(tsk, regs);
+ fpu_exception_flags = 0;
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.fpu.hard.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
+ /* Set the FPSCR flag as well as cause bits - simply
+ * replicate the cause */
+ tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+ (fpu_exception_flags >> 2)) == 0) {
+ return;
+ }
+ }
+
force_sig(SIGFPE, tsk);
}
-asmlinkage void
-do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7, struct pt_regs __regs)
+BUILD_TRAP_HANDLER(fpu_state_restore)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
grab_fpu(regs);
if (!user_mode(regs)) {
@@ -324,7 +473,7 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
if (used_math()) {
/* Using the FPU again. */
restore_fpu(tsk);
- } else {
+ } else {
/* First time FPU user. */
fpu_init();
set_used_math();
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index bc9c28a69bf..f2b9238cda0 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -98,6 +98,8 @@ int __init detect_cpu_and_cache_system(void)
case 0x200A:
if (prr == 0x61)
boot_cpu_data.type = CPU_SH7781;
+ else if (prr == 0xa1)
+ boot_cpu_data.type = CPU_SH7763;
else
boot_cpu_data.type = CPU_SH7780;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 523f68a9ce0..ae3603aca61 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -126,12 +126,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(REF, REF_RCMI, REF_ROVI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF, 3),
- INTC_PRIO(SCI1, 3),
- INTC_PRIO(DMAC, 7),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
@@ -143,7 +137,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
@@ -163,7 +157,7 @@ static struct intc_group groups_dma4[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
vectors_dma4, groups_dma4,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
/* SH7750R and SH7751R both have 8-channel DMA controllers */
@@ -184,7 +178,7 @@ static struct intc_group groups_dma8[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
vectors_dma8, groups_dma8,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
#endif
/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
@@ -205,7 +199,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
- vectors_tmu34, NULL, priorities,
+ vectors_tmu34, NULL,
mask_registers, prio_registers, NULL);
#endif
@@ -216,7 +210,7 @@ static struct intc_vect vectors_irlm[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
- priorities, NULL, prio_registers, NULL);
+ NULL, prio_registers, NULL);
/* SH7751 and SH7751R both have PCI */
#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
@@ -233,7 +227,7 @@ static struct intc_group groups_pci[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 7a898cb1d94..85f81579b97 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -92,15 +92,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(REF, REF_RCMI, REF_ROVI),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SIM, 3),
- INTC_PRIO(DMAC, 7),
- INTC_PRIO(DMABRG, 13),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
{ IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
@@ -132,7 +123,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
@@ -140,7 +131,7 @@ static struct intc_vect vectors_irq[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
static struct plat_sci_port sci_platform_data[] = {
{
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
new file mode 100644
index 00000000000..7b2d337ee41
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -0,0 +1,892 @@
+/*
+ * Floating point emulation support for subnormalised numbers on SH4
+ * architecture This file is derived from the SoftFloat IEC/IEEE
+ * Floating-point Arithmetic Package, Release 2 the original license of
+ * which is reproduced below.
+ *
+ * ========================================================================
+ *
+ * This C source file is part of the SoftFloat IEC/IEEE Floating-point
+ * Arithmetic Package, Release 2.
+ *
+ * Written by John R. Hauser. This work was made possible in part by the
+ * International Computer Science Institute, located at Suite 600, 1947 Center
+ * Street, Berkeley, California 94704. Funding was partially provided by the
+ * National Science Foundation under grant MIP-9311980. The original version
+ * of this code was written as part of a project to build a fixed-point vector
+ * processor in collaboration with the University of California at Berkeley,
+ * overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+ * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+ * arithmetic/softfloat.html'.
+ *
+ * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+ * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+ * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+ * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+ * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+ *
+ * Derivative works are acceptable, even for commercial purposes, so long as
+ * (1) they include prominent notice that the work is derivative, and (2) they
+ * include prominent notice akin to these three paragraphs for those parts of
+ * this code that are retained.
+ *
+ * ========================================================================
+ *
+ * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com>
+ * and Kamel Khelifi <kamel.khelifi@st.com>
+ */
+#include <linux/kernel.h>
+#include <asm/cpu/fpu.h>
+
+#define LIT64( a ) a##LL
+
+typedef char flag;
+typedef unsigned char uint8;
+typedef signed char int8;
+typedef int uint16;
+typedef int int16;
+typedef unsigned int uint32;
+typedef signed int int32;
+
+typedef unsigned long long int bits64;
+typedef signed long long int sbits64;
+
+typedef unsigned char bits8;
+typedef signed char sbits8;
+typedef unsigned short int bits16;
+typedef signed short int sbits16;
+typedef unsigned int bits32;
+typedef signed int sbits32;
+
+typedef unsigned long long int uint64;
+typedef signed long long int int64;
+
+typedef unsigned long int float32;
+typedef unsigned long long float64;
+
+extern void float_raise(unsigned int flags); /* in fpu.c */
+extern int float_rounding_mode(void); /* in fpu.c */
+
+inline bits64 extractFloat64Frac(float64 a);
+inline flag extractFloat64Sign(float64 a);
+inline int16 extractFloat64Exp(float64 a);
+inline int16 extractFloat32Exp(float32 a);
+inline flag extractFloat32Sign(float32 a);
+inline bits32 extractFloat32Frac(float32 a);
+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
+float64 float64_sub(float64 a, float64 b);
+float32 float32_sub(float32 a, float32 b);
+float32 float32_add(float32 a, float32 b);
+float64 float64_add(float64 a, float64 b);
+float64 float64_div(float64 a, float64 b);
+float32 float32_div(float32 a, float32 b);
+float32 float32_mul(float32 a, float32 b);
+float64 float64_mul(float64 a, float64 b);
+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
+
+static int8 countLeadingZeros32(bits32 a);
+static int8 countLeadingZeros64(bits64 a);
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
+ bits64 zSig);
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
+ bits32 zSig);
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
+static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
+ bits64 * zSigPtr);
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr);
+
+inline bits64 extractFloat64Frac(float64 a)
+{
+ return a & LIT64(0x000FFFFFFFFFFFFF);
+}
+
+inline flag extractFloat64Sign(float64 a)
+{
+ return a >> 63;
+}
+
+inline int16 extractFloat64Exp(float64 a)
+{
+ return (a >> 52) & 0x7FF;
+}
+
+inline int16 extractFloat32Exp(float32 a)
+{
+ return (a >> 23) & 0xFF;
+}
+
+inline flag extractFloat32Sign(float32 a)
+{
+ return a >> 31;
+}
+
+inline bits32 extractFloat32Frac(float32 a)
+{
+ return a & 0x007FFFFF;
+}
+
+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
+}
+
+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
+{
+ bits64 z;
+
+ if (count == 0) {
+ z = a;
+ } else if (count < 64) {
+ z = (a >> count) | ((a << ((-count) & 63)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static int8 countLeadingZeros32(bits32 a)
+{
+ static const int8 countLeadingZerosHigh[] = {
+ 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < 0x10000) {
+ shiftCount += 16;
+ a <<= 16;
+ }
+ if (a < 0x1000000) {
+ shiftCount += 8;
+ a <<= 8;
+ }
+ shiftCount += countLeadingZerosHigh[a >> 24];
+ return shiftCount;
+
+}
+
+static int8 countLeadingZeros64(bits64 a)
+{
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < ((bits64) 1) << 32) {
+ shiftCount += 32;
+ } else {
+ a >>= 32;
+ }
+ shiftCount += countLeadingZeros32(a);
+ return shiftCount;
+
+}
+
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(zSig) - 1;
+ return roundAndPackFloat64(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+
+}
+
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 10;
+ bSig <<= 10;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign ^ 1, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ bSig |= LIT64(0x4000000000000000);
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ aSig |= LIT64(0x4000000000000000);
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
+
+}
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 9;
+ bSig <<= 9;
+ if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat64(zSign, 0, (aSig + bSig) >> 9);
+ zSig = LIT64(0x4000000000000000) + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= LIT64(0x2000000000000000);
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits64) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
+}
+
+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
+{
+ bits32 z;
+ if (count == 0) {
+ z = a;
+ } else if (count < 32) {
+ z = (a >> count) | ((a << ((-count) & 31)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ flag roundNearestEven;
+ int8 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x7F;
+ if (0xFD <= (bits16) zExp) {
+ if ((0xFD < zExp)
+ || ((zExp == 0xFD)
+ && ((sbits32) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat32(zSign, 0xFF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement < 0x80000000);
+ shift32RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x7F;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 7;
+ zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(zSig) - 1;
+ return roundAndPackFloat32(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+}
+
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ flag roundNearestEven;
+ int16 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x200;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x3FF;
+ if (0x7FD <= (bits16) zExp) {
+ if ((0x7FD < zExp)
+ || ((zExp == 0x7FD)
+ && ((sbits64) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat64(zSign, 0x7FF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement <
+ LIT64(0x8000000000000000));
+ shift64RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x3FF;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 10;
+ zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat64(zSign, zExp, zSig);
+
+}
+
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 7;
+ bSig <<= 7;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0xFF) {
+ return packFloat32(zSign ^ 1, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x40000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ bSig |= 0x40000000;
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x40000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ aSig |= 0x40000000;
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 6;
+ bSig <<= 6;
+ if (0 < expDiff) {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x20000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x20000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat32(zSign, 0, (aSig + bSig) >> 6);
+ zSig = 0x40000000 + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= 0x20000000;
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits32) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_sub(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return subFloat64Sigs(a, b, aSign);
+ } else {
+ return addFloat64Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_sub(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return subFloat32Sigs(a, b, aSign);
+ } else {
+ return addFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_add(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return addFloat32Sigs(a, b, aSign);
+ } else {
+ return subFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float64 float64_add(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return addFloat64Sigs(a, b, aSign);
+ } else {
+ return subFloat64Sigs(a, b, aSign);
+ }
+}
+
+static void
+normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(aSig) - 11;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+}
+
+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ bits64 z1;
+
+ z1 = a1 + b1;
+ *z1Ptr = z1;
+ *z0Ptr = a0 + b0 + (z1 < a1);
+}
+
+inline void
+sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ *z1Ptr = a1 - b1;
+ *z0Ptr = a0 - b0 - (a1 < b1);
+}
+
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
+{
+ bits64 b0, b1;
+ bits64 rem0, rem1, term0, term1;
+ bits64 z;
+ if (b <= a0)
+ return LIT64(0xFFFFFFFFFFFFFFFF);
+ b0 = b >> 32;
+ z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
+ mul64To128(b, z, &term0, &term1);
+ sub128(a0, a1, term0, term1, &rem0, &rem1);
+ while (((sbits64) rem0) < 0) {
+ z -= LIT64(0x100000000);
+ b1 = b << 32;
+ add128(rem0, rem1, b0, b1, &rem0, &rem1);
+ }
+ rem0 = (rem0 << 32) | (rem1 >> 32);
+ z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
+ return z;
+}
+
+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
+{
+ bits32 aHigh, aLow, bHigh, bLow;
+ bits64 z0, zMiddleA, zMiddleB, z1;
+
+ aLow = a;
+ aHigh = a >> 32;
+ bLow = b;
+ bHigh = b >> 32;
+ z1 = ((bits64) aLow) * bLow;
+ zMiddleA = ((bits64) aLow) * bHigh;
+ zMiddleB = ((bits64) aHigh) * bLow;
+ z0 = ((bits64) aHigh) * bHigh;
+ zMiddleA += zMiddleB;
+ z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
+ zMiddleA <<= 32;
+ z1 += zMiddleA;
+ z0 += (z1 < zMiddleA);
+ *z1Ptr = z1;
+ *z0Ptr = z0;
+
+}
+
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(aSig) - 8;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+
+}
+
+float64 float64_div(float64 a, float64 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ bits64 rem0, rem1;
+ bits64 term0, term1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (bExp == 0x7FF) {
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ if ((aExp | aSig) == 0) {
+ float_raise(FPSCR_CAUSE_INVALID);
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x3FD;
+ aSig = (aSig | LIT64(0x0010000000000000)) << 10;
+ bSig = (bSig | LIT64(0x0010000000000000)) << 11;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = estimateDiv128To64(aSig, 0, bSig);
+ if ((zSig & 0x1FF) <= 2) {
+ mul64To128(bSig, zSig, &term0, &term1);
+ sub128(aSig, 0, term0, term1, &rem0, &rem1);
+ while ((sbits64) rem0 < 0) {
+ --zSig;
+ add128(rem0, rem1, 0, bSig, &rem0, &rem1);
+ }
+ zSig |= (rem1 != 0);
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+float32 float32_div(float32 a, float32 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0xFF) {
+ if (bExp == 0xFF) {
+ }
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x7D;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = (((bits64) aSig) << 32) / bSig;
+ if ((zSig & 0x3F) == 0) {
+ zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
+ }
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float32 float32_mul(float32 a, float32 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned int aSig, bSig;
+ unsigned long long zSig64;
+ unsigned int zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
+ return roundAndPackFloat32(zSign, 0xff, 0);
+
+ zExp = aExp + bExp - 0x7F;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
+ zSig = zSig64;
+ if (0 <= (signed int)(zSig << 1)) {
+ zSig <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_mul(float64 a, float64 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned long long int aSig, bSig, zSig0, zSig1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
+ return roundAndPackFloat64(zSign, 0x7ff, 0);
+
+ zExp = aExp + bExp - 0x3FF;
+ aSig = (aSig | 0x0010000000000000LL) << 10;
+ bSig = (bSig | 0x0010000000000000LL) << 11;
+ mul64To128(aSig, bSig, &zSig0, &zSig1);
+ zSig0 |= (zSig1 != 0);
+ if (0 <= (signed long long int)(zSig0 << 1)) {
+ zSig0 <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig0);
+}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index b22a78c807e..3008c00eea6 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
{
unsigned int cpu = sysdev->id;
struct kobject *kobj;
+ int error;
sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(!sq_kobject[cpu]))
return -ENOMEM;
kobj = sq_kobject[cpu];
- kobj->parent = &sysdev->kobj;
- kobject_set_name(kobj, "%s", "sq");
- kobj->ktype = &ktype_percpu_entry;
-
- return kobject_register(kobj);
+ error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
+ "%s", "sq");
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ return error;
}
static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
unsigned int cpu = sysdev->id;
struct kobject *kobj = sq_kobject[cpu];
- kobject_unregister(kobj);
+ kobject_put(kobj);
return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 24539873943..08ac6387bf1 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,6 +3,7 @@
#
# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o
# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
new file mode 100644
index 00000000000..45889d412c8
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -0,0 +1,126 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+ *
+ * SH7763 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
+static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
+}
+
+static struct clk_ops sh7763_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
+ clk->rate = clk->parent->rate / p0fc_divisors[idx];
+}
+
+static struct clk_ops sh7763_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
+ clk->rate = clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct clk_ops sh7763_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7763_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7763_clk_ops[] = {
+ &sh7763_master_clk_ops,
+ &sh7763_module_clk_ops,
+ &sh7763_bus_clk_ops,
+ &sh7763_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7763_clk_ops))
+ *ops = sh7763_clk_ops[idx];
+}
+
+static void shyway_clk_recalc(struct clk *clk)
+{
+ int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
+ clk->rate = clk->parent->rate / cfc_divisors[idx];
+}
+
+static struct clk_ops sh7763_shyway_clk_ops = {
+ .recalc = shyway_clk_recalc,
+};
+
+static struct clk sh7763_shyway_clk = {
+ .name = "shyway_clk",
+ .flags = CLK_ALWAYS_ENABLED,
+ .ops = &sh7763_shyway_clk_ops,
+};
+
+/*
+ * Additional SH7763-specific on-chip clocks that aren't already part of the
+ * clock framework
+ */
+static struct clk *sh7763_onchip_clocks[] = {
+ &sh7763_shyway_clk,
+};
+
+static int __init sh7763_clk_init(void)
+{
+ struct clk *clk = clk_get(NULL, "master_clk");
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
+ struct clk *clkp = sh7763_onchip_clocks[i];
+
+ clkp->parent = clk;
+ clk_register(clkp);
+ clk_enable(clkp);
+ }
+
+ /*
+ * Now that we have the rest of the clocks registered, we need to
+ * force the parent clock to propagate so that these clocks will
+ * automatically figure out their rate. We cheat by handing the
+ * parent clock its current rate and forcing child propagation.
+ */
+ clk_set_rate(clk, clk_get_rate(clk));
+
+ clk_put(clk);
+
+ return 0;
+}
+
+arch_initcall(sh7763_clk_init);
+
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b9c6547c4a9..73c778d40d1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -157,14 +157,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(TMU0, 2),
- INTC_PRIO(TMU1, 2),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
@@ -217,7 +209,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
mask_registers, prio_registers, sense_registers);
void __init plat_irq_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
new file mode 100644
index 00000000000..eabd5386812
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -0,0 +1,390 @@
+/*
+ * SH7763 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <asm/sci.h>
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffe80000,
+ .end = 0xffe80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 22,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .mapbase = 0xffe08000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xffec8000,
+ .end = 0xffec80ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 83,
+ .end = 83,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .start = 0xffec0000,
+ .end = 0xffec00ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 84,
+ .end = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
+static struct platform_device *sh7763_devices[] __initdata = {
+ &rtc_device,
+ &sci_device,
+ &usb_ohci_device,
+ &usbf_device,
+};
+
+static int __init sh7763_devices_setup(void)
+{
+ return platform_add_devices(sh7763_devices,
+ ARRAY_SIZE(sh7763_devices));
+}
+__initcall(sh7763_devices_setup);
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ RTC_ATI, RTC_PRI, RTC_CUI,
+ WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, LCDC,
+ DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
+ SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
+ DMAC0_DMINT4, DMAC0_DMINT5,
+ IIC0, IIC1,
+ CMT,
+ GEINT0, GEINT1, GEINT2,
+ HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
+ PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
+ STIF0, STIF1,
+ SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
+ SIOF0, SIOF1, SIOF2,
+ USBH, USBFI0, USBFI1,
+ TPU, PCC,
+ MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
+ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
+ TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
+ SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
+ GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
+
+ /* interrupt groups */
+
+ TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
+ SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
+ INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
+ INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
+ INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
+ INTC_VECT(LCDC, 0x620),
+ INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
+ INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
+ INTC_VECT(DMAC0_DMAE, 0x6c0),
+ INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
+ INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
+ INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
+ INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
+ INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
+ INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
+ INTC_VECT(HAC, 0x980),
+ INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
+ INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
+ INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
+ INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
+ INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
+ INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
+ INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
+ INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
+ INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
+ INTC_VECT(USBFI1, 0xca0),
+ INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
+ INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
+ INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
+ INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
+ INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
+ INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
+ INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
+ INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
+ INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
+ INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
+ INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+ INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
+ INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
+ DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
+ INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
+ INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
+ INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
+ INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
+ INTC_GROUP(USBF, USBFI0, USBFI1),
+ INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
+ INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
+ INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
+ INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
+};
+
+static struct intc_prio priorities[] __initdata = {
+ INTC_PRIO(SCIF0, 3),
+ INTC_PRIO(SCIF1, 3),
+ INTC_PRIO(SCIF2, 3),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, GPIO, 0,
+ SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
+ PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
+ HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
+ 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
+ PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
+ LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
+ TMU2, TMU2_TICPI } },
+ { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
+ { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
+ { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
+ { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
+ PCISERR, PCIINTA } },
+ { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
+ PCIINTD, PCIC5 } },
+ { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
+ { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
+ { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
+ { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
+ { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
+ { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
+ { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
+ { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
+ NULL, NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
+ NULL, NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
+ NULL, NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ7-0 */
+ ctrl_outl(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ ctrl_outl(0xc0000000, INTC_INTMSK1);
+ ctrl_outl(0xfffefffe, INTC_INTMSK2);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index e8fd33ff060..293004b526f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -168,11 +168,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
@@ -195,7 +190,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -223,7 +218,7 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
- NULL, NULL, irq_mask_registers, irq_prio_registers,
+ NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers);
/* External interrupt pins in IRL mode */
@@ -257,10 +252,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
- NULL, NULL, irl7654_mask_registers, NULL, NULL);
+ NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
- NULL, NULL, irl3210_mask_registers, NULL, NULL);
+ NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 39b215d6cee..74b60e96cdf 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -178,15 +178,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF3, 3),
- INTC_PRIO(SCIF4, 3),
- INTC_PRIO(SCIF5, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
@@ -227,7 +218,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -248,11 +239,11 @@ static struct intc_sense_reg sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
- NULL, NULL, mask_registers, prio_registers,
+ NULL, mask_registers, prio_registers,
sense_registers);
static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
- NULL, NULL, mask_registers, prio_registers,
+ NULL, mask_registers, prio_registers,
sense_registers);
/* External interrupt pins in IRL mode */
@@ -280,10 +271,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
- NULL, NULL, mask_registers, NULL, NULL);
+ NULL, mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
- NULL, NULL, mask_registers, NULL, NULL);
+ NULL, mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index c6cdd7e3b04..4dc958b6b31 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -165,13 +165,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
};
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
- INTC_PRIO(SCIF3, 3),
-};
-
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -218,7 +211,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
};
-static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
@@ -232,8 +225,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
- priorities, mask_registers, prio_registers,
- sense_registers);
+ mask_registers, prio_registers, sense_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl[] __initdata = {
@@ -248,7 +240,7 @@ static struct intc_vect vectors_irl[] __initdata = {
};
static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
- priorities, mask_registers, prio_registers, NULL);
+ mask_registers, prio_registers, NULL);
void __init plat_irq_setup_pins(int mode)
{
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
new file mode 100644
index 00000000000..8646363e9de
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-5 backends.
+#
+obj-y := entry.o probe.o switchto.o
+
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_KALLSYMS) += unwind.o
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 00000000000..ba8750176d9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2101 @@
+/*
+ * arch/sh/kernel/cpu/sh5/entry.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <asm/cpu/registers.h>
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * SR fields.
+ */
+#define SR_ASID_MASK 0x00ff0000
+#define SR_FD_MASK 0x00008000
+#define SR_SS 0x08000000
+#define SR_BL 0x10000000
+#define SR_MD 0x40000000
+
+/*
+ * Event code.
+ */
+#define EVENT_INTERRUPT 0
+#define EVENT_FAULT_TLB 1
+#define EVENT_FAULT_NOT_TLB 2
+#define EVENT_DEBUG 3
+
+/* EXPEVT values */
+#define RESET_CAUSE 0x20
+#define DEBUGSS_CAUSE 0x980
+
+/*
+ * Frame layout. Quad index.
+ */
+#define FRAME_T(x) FRAME_TBASE+(x*8)
+#define FRAME_R(x) FRAME_RBASE+(x*8)
+#define FRAME_S(x) FRAME_SBASE+(x*8)
+#define FSPC 0
+#define FSSR 1
+#define FSYSCALL_ID 2
+
+/* Arrange the save frame to be a multiple of 32 bytes long */
+#define FRAME_SBASE 0
+#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
+#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
+#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
+#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
+
+#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
+#define FP_FRAME_BASE 0
+
+#define SAVED_R2 0*8
+#define SAVED_R3 1*8
+#define SAVED_R4 2*8
+#define SAVED_R5 3*8
+#define SAVED_R18 4*8
+#define SAVED_R6 5*8
+#define SAVED_TR0 6*8
+
+/* These are the registers saved in the TLB path that aren't saved in the first
+ level of the normal one. */
+#define TLB_SAVED_R25 7*8
+#define TLB_SAVED_TR1 8*8
+#define TLB_SAVED_TR2 9*8
+#define TLB_SAVED_TR3 10*8
+#define TLB_SAVED_TR4 11*8
+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+ breakage otherwise. */
+#define TLB_SAVED_R0 12*8
+#define TLB_SAVED_R1 13*8
+
+#define CLI() \
+ getcon SR, r6; \
+ ori r6, 0xf0, r6; \
+ putcon r6, SR;
+
+#define STI() \
+ getcon SR, r6; \
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop() CLI()
+#else
+# define preempt_stop()
+# define resume_kernel restore_all
+#endif
+
+ .section .data, "aw"
+
+#define FAST_TLBMISS_STACK_CACHELINES 4
+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+
+/* Register back-up area for all exceptions */
+ .balign 32
+ /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+ * register saves etc. */
+ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+/* This is 32 byte aligned by construction */
+/* Register back-up area for all exceptions */
+reg_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+
+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+ * reentrancy. Note this area may be accessed via physical address.
+ * Align so this fits a whole single cache line, for ease of purging.
+ */
+ .balign 32,0,32
+resvec_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .balign 32,0,32
+
+/* Jump table of 3rd level handlers */
+trap_jtable:
+ .long do_exception_error /* 0x000 */
+ .long do_exception_error /* 0x020 */
+ .long tlb_miss_load /* 0x040 */
+ .long tlb_miss_store /* 0x060 */
+ ! ARTIFICIAL pseudo-EXPEVT setting
+ .long do_debug_interrupt /* 0x080 */
+ .long tlb_miss_load /* 0x0A0 */
+ .long tlb_miss_store /* 0x0C0 */
+ .long do_address_error_load /* 0x0E0 */
+ .long do_address_error_store /* 0x100 */
+#ifdef CONFIG_SH_FPU
+ .long do_fpu_error /* 0x120 */
+#else
+ .long do_exception_error /* 0x120 */
+#endif
+ .long do_exception_error /* 0x140 */
+ .long system_call /* 0x160 */
+ .long do_reserved_inst /* 0x180 */
+ .long do_illegal_slot_inst /* 0x1A0 */
+ .long do_exception_error /* 0x1C0 - NMI */
+ .long do_exception_error /* 0x1E0 */
+ .rept 15
+ .long do_IRQ /* 0x200 - 0x3C0 */
+ .endr
+ .long do_exception_error /* 0x3E0 */
+ .rept 32
+ .long do_IRQ /* 0x400 - 0x7E0 */
+ .endr
+ .long fpu_error_or_IRQA /* 0x800 */
+ .long fpu_error_or_IRQB /* 0x820 */
+ .long do_IRQ /* 0x840 */
+ .long do_IRQ /* 0x860 */
+ .rept 6
+ .long do_exception_error /* 0x880 - 0x920 */
+ .endr
+ .long do_software_break_point /* 0x940 */
+ .long do_exception_error /* 0x960 */
+ .long do_single_step /* 0x980 */
+
+ .rept 3
+ .long do_exception_error /* 0x9A0 - 0x9E0 */
+ .endr
+ .long do_IRQ /* 0xA00 */
+ .long do_IRQ /* 0xA20 */
+ .long itlb_miss_or_IRQ /* 0xA40 */
+ .long do_IRQ /* 0xA60 */
+ .long do_IRQ /* 0xA80 */
+ .long itlb_miss_or_IRQ /* 0xAA0 */
+ .long do_exception_error /* 0xAC0 */
+ .long do_address_error_exec /* 0xAE0 */
+ .rept 8
+ .long do_exception_error /* 0xB00 - 0xBE0 */
+ .endr
+ .rept 18
+ .long do_IRQ /* 0xC00 - 0xE20 */
+ .endr
+
+ .section .text64, "ax"
+
+/*
+ * --- Exception/Interrupt/Event Handling Section
+ */
+
+/*
+ * VBR and RESVEC blocks.
+ *
+ * First level handler for VBR-based exceptions.
+ *
+ * To avoid waste of space, align to the maximum text block size.
+ * This is assumed to be at most 128 bytes or 32 instructions.
+ * DO NOT EXCEED 32 instructions on the first level handlers !
+ *
+ * Also note that RESVEC is contained within the VBR block
+ * where the room left (1KB - TEXT_SIZE) allows placing
+ * the RESVEC block (at most 512B + TEXT_SIZE).
+ *
+ * So first (and only) level handler for RESVEC-based exceptions.
+ *
+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+ * and interrupt) we are a lot tight with register space until
+ * saving onto the stack frame, which is done in handle_exception().
+ *
+ */
+
+#define TEXT_SIZE 128
+#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
+
+ .balign TEXT_SIZE
+LVBR_block:
+ .space 256, 0 /* Power-on class handler, */
+ /* not required here */
+not_a_tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+ ! VBR+0x200
+ nop
+ .balign 256
+ ! VBR+0x300
+ nop
+ .balign 256
+ /*
+ * Instead of the natural .balign 1024 place RESVEC here
+ * respecting the final 1KB alignment.
+ */
+ .balign TEXT_SIZE
+ /*
+ * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+ * block making sure the final alignment is correct.
+ */
+tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, KCR1
+ movi reg_save_area, SP
+ /* SP is guaranteed 32-byte aligned. */
+ st.q SP, TLB_SAVED_R0 , r0
+ st.q SP, TLB_SAVED_R1 , r1
+ st.q SP, SAVED_R2 , r2
+ st.q SP, SAVED_R3 , r3
+ st.q SP, SAVED_R4 , r4
+ st.q SP, SAVED_R5 , r5
+ st.q SP, SAVED_R6 , r6
+ st.q SP, SAVED_R18, r18
+
+ /* Save R25 for safety; as/ld may want to use it to achieve the call to
+ * the code in mm/tlbmiss.c */
+ st.q SP, TLB_SAVED_R25, r25
+ gettr tr0, r2
+ gettr tr1, r3
+ gettr tr2, r4
+ gettr tr3, r5
+ gettr tr4, r18
+ st.q SP, SAVED_TR0 , r2
+ st.q SP, TLB_SAVED_TR1 , r3
+ st.q SP, TLB_SAVED_TR2 , r4
+ st.q SP, TLB_SAVED_TR3 , r5
+ st.q SP, TLB_SAVED_TR4 , r18
+
+ pt do_fast_page_fault, tr0
+ getcon SSR, r2
+ getcon EXPEVT, r3
+ getcon TEA, r4
+ shlri r2, 30, r2
+ andi r2, 1, r2 /* r2 = SSR.MD */
+ blink tr0, LINK
+
+ pt fixup_to_invoke_general_handler, tr1
+
+ /* If the fast path handler fixed the fault, just drop through quickly
+ to the restore code right away to return to the excepting context.
+ */
+ beqi/u r2, 0, tr1
+
+fast_tlb_miss_restore:
+ ld.q SP, SAVED_TR0, r2
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+
+ ptabs r2, tr0
+ ptabs r3, tr1
+ ptabs r4, tr2
+ ptabs r5, tr3
+ ptabs r18, tr4
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+ ld.q SP, SAVED_R2, r2
+ ld.q SP, SAVED_R3, r3
+ ld.q SP, SAVED_R4, r4
+ ld.q SP, SAVED_R5, r5
+ ld.q SP, SAVED_R6, r6
+ ld.q SP, SAVED_R18, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ getcon KCR1, SP
+ rte
+ nop /* for safety, in case the code is run on sh5-101 cut1.x */
+
+fixup_to_invoke_general_handler:
+
+ /* OK, new method. Restore stuff that's not expected to get saved into
+ the 'first-level' reg save area, then just fall through to setting
+ up the registers and calling the second-level handler. */
+
+ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
+ r25,tr1-4 and save r6 to get into the right state. */
+
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+
+ ptabs/u r3, tr1
+ ptabs/u r4, tr2
+ ptabs/u r5, tr3
+ ptabs/u r18, tr4
+
+ /* Set args for Non-debug, TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+ DOES END UP AT VBR+0x600 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .balign 256
+ /* VBR + 0x600 */
+
+interrupt:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for interrupt class handler */
+ getcon INTEVT, r2
+ movi ret_from_irq, r3
+ ori r3, 1, r3
+ movi EVENT_INTERRUPT, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+ .balign TEXT_SIZE /* let's waste the bare minimum */
+
+LVBR_block_end: /* Marker. Used for total checking */
+
+ .balign 256
+LRESVEC_block:
+ /* Panic handler. Called with MMU off. Possible causes/actions:
+ * - Reset: Jump to program start.
+ * - Single Step: Turn off Single Step & return.
+ * - Others: Call panic handler, passing PC as arg.
+ * (this may need to be extended...)
+ */
+reset_or_panic:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, DCR
+ /* First save r0-1 and tr0, as we need to use these */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ gettr tr0, r0
+ st.q SP, 32, r0
+
+ /* Check cause */
+ getcon EXPEVT, r0
+ movi RESET_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if reset */
+ movi _stext-CONFIG_PAGE_OFFSET, r0
+ ori r0, 1, r0
+ ptabs r0, tr0
+ beqi r1, 0, tr0 /* Jump to start address if reset */
+
+ getcon EXPEVT, r0
+ movi DEBUGSS_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if single step */
+ pta single_step_panic, tr0
+ beqi r1, 0, tr0 /* jump if single step */
+
+ /* Now jump to where we save the registers. */
+ movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ blink tr0, r63
+
+single_step_panic:
+ /* We are in a handler with Single Step set. We need to resume the
+ * handler, by turning on MMU & turning off Single Step. */
+ getcon SSR, r0
+ movi SR_MMU, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Restore EXPEVT, as the rte won't do this */
+ getcon PEXPEVT, r0
+ putcon r0, EXPEVT
+ /* Restore regs */
+ ld.q SP, 32, r0
+ ptabs r0, tr0
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+ getcon DCR, SP
+ synco
+ rte
+
+
+ .balign 256
+debug_exception:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /*
+ * Single step/software_break_point first level handler.
+ * Called with MMU off, so the first thing we do is enable it
+ * by doing an rte with appropriate SSR.
+ */
+ putcon SP, DCR
+ /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+
+ /* With the MMU off, we are bypassing the cache, so purge any
+ * data that will be made stale by the following stores.
+ */
+ ocbp SP, 0
+ synco
+
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ getcon SPC, r0
+ st.q SP, 16, r0
+ getcon SSR, r0
+ st.q SP, 24, r0
+
+ /* Enable MMU, block exceptions, set priv mode, disable single step */
+ movi SR_MMU | SR_BL | SR_MD, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Force control to debug_exception_2 when rte is executed */
+ movi debug_exeception_2, r0
+ ori r0, 1, r0 /* force SHmedia, just in case */
+ putcon r0, SPC
+ getcon DCR, SP
+ synco
+ rte
+debug_exeception_2:
+ /* Restore saved regs */
+ putcon SP, KCR1
+ movi resvec_save_area, SP
+ ld.q SP, 24, r0
+ putcon r0, SSR
+ ld.q SP, 16, r0
+ putcon r0, SPC
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for debug class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_DEBUG, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+debug_interrupt:
+ /* !!! WE COME HERE IN REAL MODE !!! */
+ /* Hook-up debug interrupt to allow various debugging options to be
+ * hooked into its handler. */
+ /* Save original stack pointer into KCR1 */
+ synco
+ putcon SP, KCR1
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ ocbp SP, 0
+ ocbp SP, 32
+ synco
+
+ /* Save other original registers into reg_save_area thru real addresses */
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* move (spc,ssr)->(pspc,pssr). The rte will shift
+ them back again, so that they look like the originals
+ as far as the real handler code is concerned. */
+ getcon spc, r6
+ putcon r6, pspc
+ getcon ssr, r6
+ putcon r6, pssr
+
+ ! construct useful SR for handle_exception
+ movi 3, r6
+ shlli r6, 30, r6
+ getcon sr, r18
+ or r18, r6, r6
+ putcon r6, ssr
+
+ ! SSR is now the current SR with the MD and MMU bits set
+ ! i.e. the rte will switch back to priv mode and put
+ ! the mmu back on
+
+ ! construct spc
+ movi handle_exception, r18
+ ori r18, 1, r18 ! for safety (do we need this?)
+ putcon r18, spc
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+
+ ! EXPEVT==0x80 is unused, so 'steal' this value to put the
+ ! debug interrupt handler in the vectoring table
+ movi 0x80, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+
+ or SP, ZERO, r5
+ movi CONFIG_PAGE_OFFSET, r6
+ add r6, r5, r5
+ getcon KCR1, SP
+
+ synco ! for safety
+ rte ! -> handle_exception, switch back to priv mode again
+
+LRESVEC_block_end: /* Marker. Unused. */
+
+ .balign TEXT_SIZE
+
+/*
+ * Second level handler for VBR-based exceptions. Pre-handler.
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (KCR0) Current [current task union]
+ * (KCR1) Original SP
+ * (r2) INTEVT/EXPEVT
+ * (r3) appropriate return address
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
+ * (r5) Pointer to reg_save_area
+ * (SP) Original SP
+ *
+ * Available registers:
+ * (r6)
+ * (r18)
+ * (tr0)
+ *
+ */
+handle_exception:
+ /* Common 2nd level handler. */
+
+ /* First thing we need an appropriate stack pointer */
+ getcon SSR, r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta stack_ok, tr0
+ bne r6, ZERO, tr0 /* Original stack pointer is fine */
+
+ /* Set stack pointer for user fault */
+ getcon KCR0, SP
+ movi THREAD_SIZE, r6 /* Point to the end */
+ add SP, r6, SP
+
+stack_ok:
+
+/* DEBUG : check for underflow/overflow of the kernel stack */
+ pta no_underflow, tr0
+ getcon KCR0, r6
+ movi 1024, r18
+ add r6, r18, r6
+ bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
+
+/* Just panic to cause a crash. */
+bad_sp:
+ ld.b r63, 0, r6
+ nop
+
+no_underflow:
+ pta bad_sp, tr0
+ getcon kcr0, r6
+ movi THREAD_SIZE, r18
+ add r18, r6, r6
+ bgt SP, r6, tr0 ! sp above the stack
+
+ /* Make some room for the BASIC frame. */
+ movi -(FRAME_SIZE), r6
+ add SP, r6, SP
+
+/* Could do this with no stalling if we had another spare register, but the
+ code below will be OK. */
+ ld.q r5, SAVED_R2, r6
+ ld.q r5, SAVED_R3, r18
+ st.q SP, FRAME_R(2), r6
+ ld.q r5, SAVED_R4, r6
+ st.q SP, FRAME_R(3), r18
+ ld.q r5, SAVED_R5, r18
+ st.q SP, FRAME_R(4), r6
+ ld.q r5, SAVED_R6, r6
+ st.q SP, FRAME_R(5), r18
+ ld.q r5, SAVED_R18, r18
+ st.q SP, FRAME_R(6), r6
+ ld.q r5, SAVED_TR0, r6
+ st.q SP, FRAME_R(18), r18
+ st.q SP, FRAME_T(0), r6
+
+ /* Keep old SP around */
+ getcon KCR1, r6
+
+ /* Save the rest of the general purpose registers */
+ st.q SP, FRAME_R(0), r0
+ st.q SP, FRAME_R(1), r1
+ st.q SP, FRAME_R(7), r7
+ st.q SP, FRAME_R(8), r8
+ st.q SP, FRAME_R(9), r9
+ st.q SP, FRAME_R(10), r10
+ st.q SP, FRAME_R(11), r11
+ st.q SP, FRAME_R(12), r12
+ st.q SP, FRAME_R(13), r13
+ st.q SP, FRAME_R(14), r14
+
+ /* SP is somewhere else */
+ st.q SP, FRAME_R(15), r6
+
+ st.q SP, FRAME_R(16), r16
+ st.q SP, FRAME_R(17), r17
+ /* r18 is saved earlier. */
+ st.q SP, FRAME_R(19), r19
+ st.q SP, FRAME_R(20), r20
+ st.q SP, FRAME_R(21), r21
+ st.q SP, FRAME_R(22), r22
+ st.q SP, FRAME_R(23), r23
+ st.q SP, FRAME_R(24), r24
+ st.q SP, FRAME_R(25), r25
+ st.q SP, FRAME_R(26), r26
+ st.q SP, FRAME_R(27), r27
+ st.q SP, FRAME_R(28), r28
+ st.q SP, FRAME_R(29), r29
+ st.q SP, FRAME_R(30), r30
+ st.q SP, FRAME_R(31), r31
+ st.q SP, FRAME_R(32), r32
+ st.q SP, FRAME_R(33), r33
+ st.q SP, FRAME_R(34), r34
+ st.q SP, FRAME_R(35), r35
+ st.q SP, FRAME_R(36), r36
+ st.q SP, FRAME_R(37), r37
+ st.q SP, FRAME_R(38), r38
+ st.q SP, FRAME_R(39), r39
+ st.q SP, FRAME_R(40), r40
+ st.q SP, FRAME_R(41), r41
+ st.q SP, FRAME_R(42), r42
+ st.q SP, FRAME_R(43), r43
+ st.q SP, FRAME_R(44), r44
+ st.q SP, FRAME_R(45), r45
+ st.q SP, FRAME_R(46), r46
+ st.q SP, FRAME_R(47), r47
+ st.q SP, FRAME_R(48), r48
+ st.q SP, FRAME_R(49), r49
+ st.q SP, FRAME_R(50), r50
+ st.q SP, FRAME_R(51), r51
+ st.q SP, FRAME_R(52), r52
+ st.q SP, FRAME_R(53), r53
+ st.q SP, FRAME_R(54), r54
+ st.q SP, FRAME_R(55), r55
+ st.q SP, FRAME_R(56), r56
+ st.q SP, FRAME_R(57), r57
+ st.q SP, FRAME_R(58), r58
+ st.q SP, FRAME_R(59), r59
+ st.q SP, FRAME_R(60), r60
+ st.q SP, FRAME_R(61), r61
+ st.q SP, FRAME_R(62), r62
+
+ /*
+ * Save the S* registers.
+ */
+ getcon SSR, r61
+ st.q SP, FRAME_S(FSSR), r61
+ getcon SPC, r62
+ st.q SP, FRAME_S(FSPC), r62
+ movi -1, r62 /* Reset syscall_nr */
+ st.q SP, FRAME_S(FSYSCALL_ID), r62
+
+ /* Save the rest of the target registers */
+ gettr tr1, r6
+ st.q SP, FRAME_T(1), r6
+ gettr tr2, r6
+ st.q SP, FRAME_T(2), r6
+ gettr tr3, r6
+ st.q SP, FRAME_T(3), r6
+ gettr tr4, r6
+ st.q SP, FRAME_T(4), r6
+ gettr tr5, r6
+ st.q SP, FRAME_T(5), r6
+ gettr tr6, r6
+ st.q SP, FRAME_T(6), r6
+ gettr tr7, r6
+ st.q SP, FRAME_T(7), r6
+
+ ! setup FP so that unwinder can wind back through nested kernel mode
+ ! exceptions
+ add SP, ZERO, r14
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* We've pushed all the registers now, so only r2-r4 hold anything
+ * useful. Move them into callee save registers */
+ or r2, ZERO, r28
+ or r3, ZERO, r29
+ or r4, ZERO, r30
+
+ /* Preserve r2 as the event code */
+ movi evt_debug, r3
+ ori r3, 1, r3
+ ptabs r3, tr0
+
+ or SP, ZERO, r6
+ getcon TRA, r5
+ blink tr0, LINK
+
+ or r28, ZERO, r2
+ or r29, ZERO, r3
+ or r30, ZERO, r4
+#endif
+
+ /* For syscall and debug race condition, get TRA now */
+ getcon TRA, r5
+
+ /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
+ * Also set FD, to catch FPU usage in the kernel.
+ *
+ * benedict.gaster@superh.com 29/07/2002
+ *
+ * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
+ * same time change BL from 1->0, as any pending interrupt of a level
+ * higher than he previous value of IMASK will leak through and be
+ * taken unexpectedly.
+ *
+ * To avoid this we raise the IMASK and then issue another PUTCON to
+ * enable interrupts.
+ */
+ getcon SR, r6
+ movi SR_IMASK | SR_FD, r7
+ or r6, r7, r6
+ putcon r6, SR
+ movi SR_UNBLOCK_EXC, r7
+ and r6, r7, r6
+ putcon r6, SR
+
+
+ /* Now call the appropriate 3rd level handler */
+ or r3, ZERO, LINK
+ movi trap_jtable, r3
+ shlri r2, 3, r2
+ ldx.l r2, r3, r3
+ shlri r2, 2, r2
+ ptabs r3, tr0
+ or SP, ZERO, r3
+ blink tr0, ZERO
+
+/*
+ * Second level handler for VBR-based exceptions. Post-handlers.
+ *
+ * Post-handlers for interrupts (ret_from_irq), exceptions
+ * (ret_from_exception) and common reentrance doors (restore_all
+ * to get back to the original context, ret_from_syscall loop to
+ * check kernel exiting).
+ *
+ * ret_with_reschedule and work_notifysig are an inner lables of
+ * the ret_from_syscall loop.
+ *
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (SP) struct pt_regs *, original register's frame pointer (basic)
+ *
+ */
+ .global ret_from_irq
+ret_from_irq:
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_irq, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+ STI()
+ pta ret_with_reschedule, tr0
+ blink tr0, ZERO /* Do not check softirqs */
+
+ .global ret_from_exception
+ret_from_exception:
+ preempt_stop()
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_exc, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+
+ /* Check softirqs */
+
+#ifdef CONFIG_PREEMPT
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+resume_kernel:
+ pta restore_all, tr0
+
+ getcon KCR0, r6
+ ld.l r6, TI_PRE_COUNT, r7
+ beq/u r7, ZERO, tr0
+
+need_resched:
+ ld.l r6, TI_FLAGS, r7
+ movi (1 << TIF_NEED_RESCHED), r8
+ and r8, r7, r8
+ bne r8, ZERO, tr0
+
+ getcon SR, r7
+ andi r7, 0xf0, r7
+ bne r7, ZERO, tr0
+
+ movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
+ shori (PREEMPT_ACTIVE & 65535), r8
+ st.l r6, TI_PRE_COUNT, r8
+
+ STI()
+ movi schedule, r7
+ ori r7, 1, r7
+ ptabs r7, tr1
+ blink tr1, LINK
+
+ st.l r6, TI_PRE_COUNT, ZERO
+ CLI()
+
+ pta need_resched, tr1
+ blink tr1, ZERO
+#endif
+
+ .global ret_from_syscall
+ret_from_syscall:
+
+ret_with_reschedule:
+ getcon KCR0, r6 ! r6 contains current_thread_info
+ ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
+
+ movi _TIF_NEED_RESCHED, r8
+ and r8, r7, r8
+ pta work_resched, tr0
+ bne r8, ZERO, tr0
+
+ pta restore_all, tr1
+
+ movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+ and r8, r7, r8
+ pta work_notifysig, tr0
+ bne r8, ZERO, tr0
+
+ blink tr1, ZERO
+
+work_resched:
+ pta ret_from_syscall, tr0
+ gettr tr0, LINK
+ movi schedule, r6
+ ptabs r6, tr0
+ blink tr0, ZERO /* Call schedule(), return on top */
+
+work_notifysig:
+ gettr tr1, LINK
+
+ movi do_signal, r6
+ ptabs r6, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ blink tr0, LINK /* Call do_signal(regs, 0), return here */
+
+restore_all:
+ /* Do prefetches */
+
+ ld.q SP, FRAME_T(0), r6
+ ld.q SP, FRAME_T(1), r7
+ ld.q SP, FRAME_T(2), r8
+ ld.q SP, FRAME_T(3), r9
+ ptabs r6, tr0
+ ptabs r7, tr1
+ ptabs r8, tr2
+ ptabs r9, tr3
+ ld.q SP, FRAME_T(4), r6
+ ld.q SP, FRAME_T(5), r7
+ ld.q SP, FRAME_T(6), r8
+ ld.q SP, FRAME_T(7), r9
+ ptabs r6, tr4
+ ptabs r7, tr5
+ ptabs r8, tr6
+ ptabs r9, tr7
+
+ ld.q SP, FRAME_R(0), r0
+ ld.q SP, FRAME_R(1), r1
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+ ld.q SP, FRAME_R(8), r8
+ ld.q SP, FRAME_R(9), r9
+ ld.q SP, FRAME_R(10), r10
+ ld.q SP, FRAME_R(11), r11
+ ld.q SP, FRAME_R(12), r12
+ ld.q SP, FRAME_R(13), r13
+ ld.q SP, FRAME_R(14), r14
+
+ ld.q SP, FRAME_R(16), r16
+ ld.q SP, FRAME_R(17), r17
+ ld.q SP, FRAME_R(18), r18
+ ld.q SP, FRAME_R(19), r19
+ ld.q SP, FRAME_R(20), r20
+ ld.q SP, FRAME_R(21), r21
+ ld.q SP, FRAME_R(22), r22
+ ld.q SP, FRAME_R(23), r23
+ ld.q SP, FRAME_R(24), r24
+ ld.q SP, FRAME_R(25), r25
+ ld.q SP, FRAME_R(26), r26
+ ld.q SP, FRAME_R(27), r27
+ ld.q SP, FRAME_R(28), r28
+ ld.q SP, FRAME_R(29), r29
+ ld.q SP, FRAME_R(30), r30
+ ld.q SP, FRAME_R(31), r31
+ ld.q SP, FRAME_R(32), r32
+ ld.q SP, FRAME_R(33), r33
+ ld.q SP, FRAME_R(34), r34
+ ld.q SP, FRAME_R(35), r35
+ ld.q SP, FRAME_R(36), r36
+ ld.q SP, FRAME_R(37), r37
+ ld.q SP, FRAME_R(38), r38
+ ld.q SP, FRAME_R(39), r39
+ ld.q SP, FRAME_R(40), r40
+ ld.q SP, FRAME_R(41), r41
+ ld.q SP, FRAME_R(42), r42
+ ld.q SP, FRAME_R(43), r43
+ ld.q SP, FRAME_R(44), r44
+ ld.q SP, FRAME_R(45), r45
+ ld.q SP, FRAME_R(46), r46
+ ld.q SP, FRAME_R(47), r47
+ ld.q SP, FRAME_R(48), r48
+ ld.q SP, FRAME_R(49), r49
+ ld.q SP, FRAME_R(50), r50
+ ld.q SP, FRAME_R(51), r51
+ ld.q SP, FRAME_R(52), r52
+ ld.q SP, FRAME_R(53), r53
+ ld.q SP, FRAME_R(54), r54
+ ld.q SP, FRAME_R(55), r55
+ ld.q SP, FRAME_R(56), r56
+ ld.q SP, FRAME_R(57), r57
+ ld.q SP, FRAME_R(58), r58
+
+ getcon SR, r59
+ movi SR_BLOCK_EXC, r60
+ or r59, r60, r59
+ putcon r59, SR /* SR.BL = 1, keep nesting out */
+ ld.q SP, FRAME_S(FSSR), r61
+ ld.q SP, FRAME_S(FSPC), r62
+ movi SR_ASID_MASK, r60
+ and r59, r60, r59
+ andc r61, r60, r61 /* Clear out older ASID */
+ or r59, r61, r61 /* Retain current ASID */
+ putcon r61, SSR
+ putcon r62, SPC
+
+ /* Ignore FSYSCALL_ID */
+
+ ld.q SP, FRAME_R(59), r59
+ ld.q SP, FRAME_R(60), r60
+ ld.q SP, FRAME_R(61), r61
+ ld.q SP, FRAME_R(62), r62
+
+ /* Last touch */
+ ld.q SP, FRAME_R(15), SP
+ rte
+ nop
+
+/*
+ * Third level handlers for VBR-based exceptions. Adapting args to
+ * and/or deflecting to fourth level handlers.
+ *
+ * Fourth level handlers interface.
+ * Most are C-coded handlers directly pointed by the trap_jtable.
+ * (Third = Fourth level)
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
+ * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
+ * (r5) TRA control register (for syscall/debug benefit only)
+ * (LINK) return address
+ * (SP) = r3
+ *
+ * Kernel TLB fault handlers will get a slightly different interface.
+ * (r2) struct pt_regs *, original register's frame pointer
+ * (r3) writeaccess, whether it's a store fault as opposed to load fault
+ * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
+ * (r5) Effective Address of fault
+ * (LINK) return address
+ * (SP) = r2
+ *
+ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
+ *
+ */
+tlb_miss_load:
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+tlb_miss_store:
+ or SP, ZERO, r2
+ movi 1, r3 /* Write */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+itlb_miss_or_IRQ:
+ pta its_IRQ, tr0
+ beqi/u r4, EVENT_INTERRUPT, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ movi 1, r4 /* Text */
+ getcon TEA, r5
+ /* Fall through */
+
+call_do_page_fault:
+ movi do_page_fault, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQA:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQB:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+its_IRQ:
+ movi do_IRQ, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+/*
+ * system_call/unknown_trap third level handler:
+ *
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (TRAP = 11)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
+ * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
+ * (SP) = r3
+ * (LINK) return address: ret_from_exception
+ * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
+ *
+ * Outputs:
+ * (*r3) Syscall reply (Saved r2)
+ * (LINK) In case of syscall only it can be scrapped.
+ * Common second level post handler will be ret_from_syscall.
+ * Common (non-trace) exit point to that is syscall_ret (saving
+ * result to r2). Common bad exit point is syscall_bad (returning
+ * ENOSYS then saved to r2).
+ *
+ */
+
+unknown_trap:
+ /* Unknown Trap or User Trace */
+ movi do_unknown_trapa, r6
+ ptabs r6, tr0
+ ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
+ andi r2, 0x1ff, r2 /* r2 = syscall # */
+ blink tr0, LINK
+
+ pta syscall_ret, tr0
+ blink tr0, ZERO
+
+ /* New syscall implementation*/
+system_call:
+ pta unknown_trap, tr0
+ or r5, ZERO, r4 /* TRA (=r5) -> r4 */
+ shlri r4, 20, r4
+ bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
+
+ /* It's a system call */
+ st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
+ andi r5, 0x1ff, r5 /* syscall # -> r5 */
+
+ STI()
+
+ pta syscall_allowed, tr0
+ movi NR_syscalls - 1, r4 /* Last valid */
+ bgeu/l r4, r5, tr0
+
+syscall_bad:
+ /* Return ENOSYS ! */
+ movi -(ENOSYS), r2 /* Fall-through */
+
+ .global syscall_ret
+syscall_ret:
+ st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+/* A different return path for ret_from_fork, because we now need
+ * to call schedule_tail with the later kernels. Because prev is
+ * loaded into r2 by switch_to() means we can just call it straight away
+ */
+
+.global ret_from_fork
+ret_from_fork:
+
+ movi schedule_tail,r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ blink tr0, LINK
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+
+syscall_allowed:
+ /* Use LINK to deflect the exit point, default is syscall_ret */
+ pta syscall_ret, tr0
+ gettr tr0, LINK
+ pta syscall_notrace, tr0
+
+ getcon KCR0, r2
+ ld.l r2, TI_FLAGS, r4
+ movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
+ and r6, r4, r6
+ beq/l r6, ZERO, tr0
+
+ /* Trace it by calling syscall_trace before and after */
+ movi syscall_trace, r4
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ ptabs r4, tr0
+ blink tr0, LINK
+
+ /* Reload syscall number as r5 is trashed by syscall_trace */
+ ld.q SP, FRAME_S(FSYSCALL_ID), r5
+ andi r5, 0x1ff, r5
+
+ pta syscall_ret_trace, tr0
+ gettr tr0, LINK
+
+syscall_notrace:
+ /* Now point to the appropriate 4th level syscall handler */
+ movi sys_call_table, r4
+ shlli r5, 2, r5
+ ldx.l r4, r5, r5
+ ptabs r5, tr0
+
+ /* Prepare original args */
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+
+ /* And now the trick for those syscalls requiring regs * ! */
+ or SP, ZERO, r8
+
+ /* Call it */
+ blink tr0, ZERO /* LINK is already properly set */
+
+syscall_ret_trace:
+ /* We get back here only if under trace */
+ st.q SP, FRAME_R(9), r2 /* Save return value */
+
+ movi syscall_trace, LINK
+ or SP, ZERO, r2
+ movi 1, r3
+ ptabs LINK, tr0
+ blink tr0, LINK
+
+ /* This needs to be done after any syscall tracing */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO /* Resume normal return sequence */
+
+/*
+ * --- Switch to running under a particular ASID and return the previous ASID value
+ * --- The caller is assumed to have done a cli before calling this.
+ *
+ * Input r2 : new ASID
+ * Output r2 : old ASID
+ */
+
+ .global switch_and_save_asid
+switch_and_save_asid:
+ getcon sr, r0
+ movi 255, r4
+ shlli r4, 16, r4 /* r4 = mask to select ASID */
+ and r0, r4, r3 /* r3 = shifted old ASID */
+ andi r2, 255, r2 /* mask down new ASID */
+ shlli r2, 16, r2 /* align new ASID against SR.ASID */
+ andc r0, r4, r0 /* efface old ASID from SR */
+ or r0, r2, r0 /* insert the new ASID */
+ putcon r0, ssr
+ movi 1f, r0
+ putcon r0, spc
+ rte
+ nop
+1:
+ ptabs LINK, tr0
+ shlri r3, 16, r2 /* r2 = old ASID */
+ blink tr0, r63
+
+ .global route_to_panic_handler
+route_to_panic_handler:
+ /* Switch to real mode, goto panic_handler, don't return. Useful for
+ last-chance debugging, e.g. if no output wants to go to the console.
+ */
+
+ movi panic_handler - CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ pta 1f, tr1
+ gettr tr1, r0
+ putcon r0, spc
+ getcon sr, r0
+ movi 1, r1
+ shlli r1, 31, r1
+ andc r0, r1, r0
+ putcon r0, ssr
+ rte
+ nop
+1: /* Now in real mode */
+ blink tr0, r63
+ nop
+
+ .global peek_real_address_q
+peek_real_address_q:
+ /* Two args:
+ r2 : real mode address to peek
+ r2(out) : result quadword
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.peek0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual peek. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ ld.q r2, 0, r2
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+ .global poke_real_address_q
+poke_real_address_q:
+ /* Two args:
+ r2 : real mode address to poke
+ r3 : quadword value to write.
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.poke0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual poke. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ st.q r2, 0, r3
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+/*
+ * --- User Access Handling Section
+ */
+
+/*
+ * User Access support. It all moved to non inlined Assembler
+ * functions in here.
+ *
+ * __kernel_size_t __copy_user(void *__to, const void *__from,
+ * __kernel_size_t __n)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) size in bytes
+ *
+ * Ouputs:
+ * (*r2) target data
+ * (r2) non-copied bytes
+ *
+ * If a fault occurs on the user pointer, bail out early and return the
+ * number of bytes not copied in r2.
+ * Strategy : for large blocks, call a real memcpy function which can
+ * move >1 byte at a time using unaligned ld/st instructions, and can
+ * manipulate the cache using prefetch + alloco to improve the speed
+ * further. If a fault occurs in that function, just revert to the
+ * byte-by-byte approach used for small blocks; this is rare so the
+ * performance hit for that case does not matter.
+ *
+ * For small blocks it's not worth the overhead of setting up and calling
+ * the memcpy routine; do the copy a byte at a time.
+ *
+ */
+ .global __copy_user
+__copy_user:
+ pta __copy_user_byte_by_byte, tr1
+ movi 16, r0 ! this value is a best guess, should tune it by benchmarking
+ bge/u r0, r4, tr1
+ pta copy_user_memcpy, tr0
+ addi SP, -32, SP
+ /* Save arguments in case we have to fix-up unhandled page fault */
+ st.q SP, 0, r2
+ st.q SP, 8, r3
+ st.q SP, 16, r4
+ st.q SP, 24, r35 ! r35 is callee-save
+ /* Save LINK in a register to reduce RTS time later (otherwise
+ ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
+ ori LINK, 0, r35
+ blink tr0, LINK
+
+ /* Copy completed normally if we get back here */
+ ptabs r35, tr0
+ ld.q SP, 24, r35
+ /* don't restore r2-r4, pointless */
+ /* set result=r2 to zero as the copy must have succeeded. */
+ or r63, r63, r2
+ addi SP, 32, SP
+ blink tr0, r63 ! RTS
+
+ .global __copy_user_fixup
+__copy_user_fixup:
+ /* Restore stack frame */
+ ori r35, 0, LINK
+ ld.q SP, 24, r35
+ ld.q SP, 16, r4
+ ld.q SP, 8, r3
+ ld.q SP, 0, r2
+ addi SP, 32, SP
+ /* Fall through to original code, in the 'same' state we entered with */
+
+/* The slow byte-by-byte method is used if the fast copy traps due to a bad
+ user address. In that rare case, the speed drop can be tolerated. */
+__copy_user_byte_by_byte:
+ pta ___copy_user_exit, tr1
+ pta ___copy_user1, tr0
+ beq/u r4, r63, tr1 /* early exit for zero length copy */
+ sub r2, r3, r0
+ addi r0, -1, r0
+
+___copy_user1:
+ ld.b r3, 0, r5 /* Fault address 1 */
+
+ /* Could rewrite this to use just 1 add, but the second comes 'free'
+ due to load latency */
+ addi r3, 1, r3
+ addi r4, -1, r4 /* No real fixup required */
+___copy_user2:
+ stx.b r3, r0, r5 /* Fault address 2 */
+ bne r4, ZERO, tr0
+
+___copy_user_exit:
+ or r4, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) size in bytes
+ *
+ * Ouputs:
+ * (*r2) zero-ed target data
+ * (r2) non-zero-ed bytes
+ */
+ .global __clear_user
+__clear_user:
+ pta ___clear_user_exit, tr1
+ pta ___clear_user1, tr0
+ beq/u r3, r63, tr1
+
+___clear_user1:
+ st.b r2, 0, ZERO /* Fault address */
+ addi r2, 1, r2
+ addi r3, -1, r3 /* No real fixup required */
+ bne r3, ZERO, tr0
+
+___clear_user_exit:
+ or r3, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+/*
+ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
+ * int __count)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) maximum size in bytes
+ *
+ * Ouputs:
+ * (*r2) copied data
+ * (r2) -EFAULT (in case of faulting)
+ * copied data (otherwise)
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ pta ___strncpy_from_user1, tr0
+ pta ___strncpy_from_user_done, tr1
+ or r4, ZERO, r5 /* r5 = original count */
+ beq/u r4, r63, tr1 /* early exit if r4==0 */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+
+___strncpy_from_user1:
+ ld.b r3, 0, r7 /* Fault address: only in reading */
+ st.b r2, 0, r7
+ addi r2, 1, r2
+ addi r3, 1, r3
+ beq/u ZERO, r7, tr1
+ addi r4, -1, r4 /* return real number of copied bytes */
+ bne/l ZERO, r4, tr0
+
+___strncpy_from_user_done:
+ sub r5, r4, r6 /* If done, return copied */
+
+___strncpy_from_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __strnlen_user(const char *__s, long __n)
+ *
+ * Inputs:
+ * (r2) source address
+ * (r3) source size in bytes
+ *
+ * Ouputs:
+ * (r2) -EFAULT (in case of faulting)
+ * string length (otherwise)
+ */
+ .global __strnlen_user
+__strnlen_user:
+ pta ___strnlen_user_set_reply, tr0
+ pta ___strnlen_user1, tr1
+ or ZERO, ZERO, r5 /* r5 = counter */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+ beq r3, ZERO, tr0
+
+___strnlen_user1:
+ ldx.b r2, r5, r7 /* Fault address: only in reading */
+ addi r3, -1, r3 /* No real fixup */
+ addi r5, 1, r5
+ beq r3, ZERO, tr0
+ bne r7, ZERO, tr1
+! The line below used to be active. This meant led to a junk byte lying between each pair
+! of entries in the argv & envp structures in memory. Whilst the program saw the right data
+! via the argv and envp arguments to main, it meant the 'flat' representation visible through
+! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
+! addi r5, 1, r5 /* Include '\0' */
+
+___strnlen_user_set_reply:
+ or r5, ZERO, r6 /* If done, return counter */
+
+___strnlen_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __get_user_asm_?(void *val, long addr)
+ *
+ * Inputs:
+ * (r2) dest address
+ * (r3) source address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __get_user_asm_b
+__get_user_asm_b:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_b1:
+ ld.b r3, 0, r5 /* r5 = data */
+ st.b r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_w
+__get_user_asm_w:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_w1:
+ ld.w r3, 0, r5 /* r5 = data */
+ st.w r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_l
+__get_user_asm_l:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_l1:
+ ld.l r3, 0, r5 /* r5 = data */
+ st.l r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_q
+__get_user_asm_q:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_q1:
+ ld.q r3, 0, r5 /* r5 = data */
+ st.q r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __put_user_asm_?(void *pval, long addr)
+ *
+ * Inputs:
+ * (r2) kernel pointer to value
+ * (r3) dest address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __put_user_asm_b
+__put_user_asm_b:
+ ld.b r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_b1:
+ st.b r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_w
+__put_user_asm_w:
+ ld.w r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_w1:
+ st.w r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_l
+__put_user_asm_l:
+ ld.l r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_l1:
+ st.l r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_q
+__put_user_asm_q:
+ ld.q r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_q1:
+ st.q r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+panic_stash_regs:
+ /* The idea is : when we get an unhandled panic, we dump the registers
+ to a known memory location, the just sit in a tight loop.
+ This allows the human to look at the memory region through the GDB
+ session (assuming the debug module's SHwy initiator isn't locked up
+ or anything), to hopefully analyze the cause of the panic. */
+
+ /* On entry, former r15 (SP) is in DCR
+ former r0 is at resvec_saved_area + 0
+ former r1 is at resvec_saved_area + 8
+ former tr0 is at resvec_saved_area + 32
+ DCR is the only register whose value is lost altogether.
+ */
+
+ movi 0xffffffff80000000, r0 ! phy of dump area
+ ld.q SP, 0x000, r1 ! former r0
+ st.q r0, 0x000, r1
+ ld.q SP, 0x008, r1 ! former r1
+ st.q r0, 0x008, r1
+ st.q r0, 0x010, r2
+ st.q r0, 0x018, r3
+ st.q r0, 0x020, r4
+ st.q r0, 0x028, r5
+ st.q r0, 0x030, r6
+ st.q r0, 0x038, r7
+ st.q r0, 0x040, r8
+ st.q r0, 0x048, r9
+ st.q r0, 0x050, r10
+ st.q r0, 0x058, r11
+ st.q r0, 0x060, r12
+ st.q r0, 0x068, r13
+ st.q r0, 0x070, r14
+ getcon dcr, r14
+ st.q r0, 0x078, r14
+ st.q r0, 0x080, r16
+ st.q r0, 0x088, r17
+ st.q r0, 0x090, r18
+ st.q r0, 0x098, r19
+ st.q r0, 0x0a0, r20
+ st.q r0, 0x0a8, r21
+ st.q r0, 0x0b0, r22
+ st.q r0, 0x0b8, r23
+ st.q r0, 0x0c0, r24
+ st.q r0, 0x0c8, r25
+ st.q r0, 0x0d0, r26
+ st.q r0, 0x0d8, r27
+ st.q r0, 0x0e0, r28
+ st.q r0, 0x0e8, r29
+ st.q r0, 0x0f0, r30
+ st.q r0, 0x0f8, r31
+ st.q r0, 0x100, r32
+ st.q r0, 0x108, r33
+ st.q r0, 0x110, r34
+ st.q r0, 0x118, r35
+ st.q r0, 0x120, r36
+ st.q r0, 0x128, r37
+ st.q r0, 0x130, r38
+ st.q r0, 0x138, r39
+ st.q r0, 0x140, r40
+ st.q r0, 0x148, r41
+ st.q r0, 0x150, r42
+ st.q r0, 0x158, r43
+ st.q r0, 0x160, r44
+ st.q r0, 0x168, r45
+ st.q r0, 0x170, r46
+ st.q r0, 0x178, r47
+ st.q r0, 0x180, r48
+ st.q r0, 0x188, r49
+ st.q r0, 0x190, r50
+ st.q r0, 0x198, r51
+ st.q r0, 0x1a0, r52
+ st.q r0, 0x1a8, r53
+ st.q r0, 0x1b0, r54
+ st.q r0, 0x1b8, r55
+ st.q r0, 0x1c0, r56
+ st.q r0, 0x1c8, r57
+ st.q r0, 0x1d0, r58
+ st.q r0, 0x1d8, r59
+ st.q r0, 0x1e0, r60
+ st.q r0, 0x1e8, r61
+ st.q r0, 0x1f0, r62
+ st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
+
+ ld.q SP, 0x020, r1 ! former tr0
+ st.q r0, 0x200, r1
+ gettr tr1, r1
+ st.q r0, 0x208, r1
+ gettr tr2, r1
+ st.q r0, 0x210, r1
+ gettr tr3, r1
+ st.q r0, 0x218, r1
+ gettr tr4, r1
+ st.q r0, 0x220, r1
+ gettr tr5, r1
+ st.q r0, 0x228, r1
+ gettr tr6, r1
+ st.q r0, 0x230, r1
+ gettr tr7, r1
+ st.q r0, 0x238, r1
+
+ getcon sr, r1
+ getcon ssr, r2
+ getcon pssr, r3
+ getcon spc, r4
+ getcon pspc, r5
+ getcon intevt, r6
+ getcon expevt, r7
+ getcon pexpevt, r8
+ getcon tra, r9
+ getcon tea, r10
+ getcon kcr0, r11
+ getcon kcr1, r12
+ getcon vbr, r13
+ getcon resvec, r14
+
+ st.q r0, 0x240, r1
+ st.q r0, 0x248, r2
+ st.q r0, 0x250, r3
+ st.q r0, 0x258, r4
+ st.q r0, 0x260, r5
+ st.q r0, 0x268, r6
+ st.q r0, 0x270, r7
+ st.q r0, 0x278, r8
+ st.q r0, 0x280, r9
+ st.q r0, 0x288, r10
+ st.q r0, 0x290, r11
+ st.q r0, 0x298, r12
+ st.q r0, 0x2a0, r13
+ st.q r0, 0x2a8, r14
+
+ getcon SPC,r2
+ getcon SSR,r3
+ getcon EXPEVT,r4
+ /* Prepare to jump to C - physical address */
+ movi panic_handler-CONFIG_PAGE_OFFSET, r1
+ ori r1, 1, r1
+ ptabs r1, tr0
+ getcon DCR, SP
+ blink tr0, ZERO
+ nop
+ nop
+ nop
+ nop
+
+
+
+
+/*
+ * --- Signal Handling Section
+ */
+
+/*
+ * extern long long _sa_default_rt_restorer
+ * extern long long _sa_default_restorer
+ *
+ * or, better,
+ *
+ * extern void _sa_default_rt_restorer(void)
+ * extern void _sa_default_restorer(void)
+ *
+ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
+ * from user space. Copied into user space by signal management.
+ * Both must be quad aligned and 2 quad long (4 instructions).
+ *
+ */
+ .balign 8
+ .global sa_default_rt_restorer
+sa_default_rt_restorer:
+ movi 0x10, r9
+ shori __NR_rt_sigreturn, r9
+ trapa r9
+ nop
+
+ .balign 8
+ .global sa_default_restorer
+sa_default_restorer:
+ movi 0x10, r9
+ shori __NR_sigreturn, r9
+ trapa r9
+ nop
+
+/*
+ * --- __ex_table Section
+ */
+
+/*
+ * User Access Exception Table.
+ */
+ .section __ex_table, "a"
+
+ .global asm_uaccess_start /* Just a marker */
+asm_uaccess_start:
+
+ .long ___copy_user1, ___copy_user_exit
+ .long ___copy_user2, ___copy_user_exit
+ .long ___clear_user1, ___clear_user_exit
+ .long ___strncpy_from_user1, ___strncpy_from_user_exit
+ .long ___strnlen_user1, ___strnlen_user_exit
+ .long ___get_user_asm_b1, ___get_user_asm_b_exit
+ .long ___get_user_asm_w1, ___get_user_asm_w_exit
+ .long ___get_user_asm_l1, ___get_user_asm_l_exit
+ .long ___get_user_asm_q1, ___get_user_asm_q_exit
+ .long ___put_user_asm_b1, ___put_user_asm_b_exit
+ .long ___put_user_asm_w1, ___put_user_asm_w_exit
+ .long ___put_user_asm_l1, ___put_user_asm_l_exit
+ .long ___put_user_asm_q1, ___put_user_asm_q_exit
+
+ .global asm_uaccess_end /* Just a marker */
+asm_uaccess_end:
+
+
+
+
+/*
+ * --- .text.init Section
+ */
+
+ .section .text.init, "ax"
+
+/*
+ * void trap_init (void)
+ *
+ */
+ .global trap_init
+trap_init:
+ addi SP, -24, SP /* Room to save r28/r29/r30 */
+ st.q SP, 0, r28
+ st.q SP, 8, r29
+ st.q SP, 16, r30
+
+ /* Set VBR and RESVEC */
+ movi LVBR_block, r19
+ andi r19, -4, r19 /* reset MMUOFF + reserved */
+ /* For RESVEC exceptions we force the MMU off, which means we need the
+ physical address. */
+ movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
+ andi r20, -4, r20 /* reset reserved */
+ ori r20, 1, r20 /* set MMUOFF */
+ putcon r19, VBR
+ putcon r20, RESVEC
+
+ /* Sanity check */
+ movi LVBR_block_end, r21
+ andi r21, -4, r21
+ movi BLOCK_SIZE, r29 /* r29 = expected size */
+ or r19, ZERO, r30
+ add r19, r29, r19
+
+ /*
+ * Ugly, but better loop forever now than crash afterwards.
+ * We should print a message, but if we touch LVBR or
+ * LRESVEC blocks we should not be surprised if we get stuck
+ * in trap_init().
+ */
+ pta trap_init_loop, tr1
+ gettr tr1, r28 /* r28 = trap_init_loop */
+ sub r21, r30, r30 /* r30 = actual size */
+
+ /*
+ * VBR/RESVEC handlers overlap by being bigger than
+ * allowed. Very bad. Just loop forever.
+ * (r28) panic/loop address
+ * (r29) expected size
+ * (r30) actual size
+ */
+trap_init_loop:
+ bne r19, r21, tr1
+
+ /* Now that exception vectors are set up reset SR.BL */
+ getcon SR, r22
+ movi SR_UNBLOCK_EXC, r23
+ and r22, r23, r22
+ putcon r22, SR
+
+ addi SP, 24, SP
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
new file mode 100644
index 00000000000..30b76a94abf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -0,0 +1,166 @@
+/*
+ * arch/sh/kernel/cpu/sh5/fpu.c
+ *
+ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
+ * Copyright (C) 2002 STMicroelectronics Limited
+ * Author : Stuart Menefy
+ *
+ * Started from SH4 version:
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/user.h>
+#include <asm/io.h>
+
+/*
+ * Initially load the FPU with signalling NANS. This bit pattern
+ * has the property that no matter whether considered as single or as
+ * double precision, it still represents a signalling NAN.
+ */
+#define sNAN64 0xFFFFFFFFFFFFFFFFULL
+#define sNAN32 0xFFFFFFFFUL
+
+static union sh_fpu_union init_fpuregs = {
+ .hard = {
+ .fp_regs = { [0 ... 63] = sNAN32 },
+ .fpscr = FPSCR_INIT
+ }
+};
+
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ asm volatile("fst.p %0, (0*8), fp0\n\t"
+ "fst.p %0, (1*8), fp2\n\t"
+ "fst.p %0, (2*8), fp4\n\t"
+ "fst.p %0, (3*8), fp6\n\t"
+ "fst.p %0, (4*8), fp8\n\t"
+ "fst.p %0, (5*8), fp10\n\t"
+ "fst.p %0, (6*8), fp12\n\t"
+ "fst.p %0, (7*8), fp14\n\t"
+ "fst.p %0, (8*8), fp16\n\t"
+ "fst.p %0, (9*8), fp18\n\t"
+ "fst.p %0, (10*8), fp20\n\t"
+ "fst.p %0, (11*8), fp22\n\t"
+ "fst.p %0, (12*8), fp24\n\t"
+ "fst.p %0, (13*8), fp26\n\t"
+ "fst.p %0, (14*8), fp28\n\t"
+ "fst.p %0, (15*8), fp30\n\t"
+ "fst.p %0, (16*8), fp32\n\t"
+ "fst.p %0, (17*8), fp34\n\t"
+ "fst.p %0, (18*8), fp36\n\t"
+ "fst.p %0, (19*8), fp38\n\t"
+ "fst.p %0, (20*8), fp40\n\t"
+ "fst.p %0, (21*8), fp42\n\t"
+ "fst.p %0, (22*8), fp44\n\t"
+ "fst.p %0, (23*8), fp46\n\t"
+ "fst.p %0, (24*8), fp48\n\t"
+ "fst.p %0, (25*8), fp50\n\t"
+ "fst.p %0, (26*8), fp52\n\t"
+ "fst.p %0, (27*8), fp54\n\t"
+ "fst.p %0, (28*8), fp56\n\t"
+ "fst.p %0, (29*8), fp58\n\t"
+ "fst.p %0, (30*8), fp60\n\t"
+ "fst.p %0, (31*8), fp62\n\t"
+
+ "fgetscr fr63\n\t"
+ "fst.s %0, (32*8), fr63\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.fpu.hard)
+ : "memory");
+}
+
+static inline void
+fpload(struct sh_fpu_hard_struct *fpregs)
+{
+ asm volatile("fld.p %0, (0*8), fp0\n\t"
+ "fld.p %0, (1*8), fp2\n\t"
+ "fld.p %0, (2*8), fp4\n\t"
+ "fld.p %0, (3*8), fp6\n\t"
+ "fld.p %0, (4*8), fp8\n\t"
+ "fld.p %0, (5*8), fp10\n\t"
+ "fld.p %0, (6*8), fp12\n\t"
+ "fld.p %0, (7*8), fp14\n\t"
+ "fld.p %0, (8*8), fp16\n\t"
+ "fld.p %0, (9*8), fp18\n\t"
+ "fld.p %0, (10*8), fp20\n\t"
+ "fld.p %0, (11*8), fp22\n\t"
+ "fld.p %0, (12*8), fp24\n\t"
+ "fld.p %0, (13*8), fp26\n\t"
+ "fld.p %0, (14*8), fp28\n\t"
+ "fld.p %0, (15*8), fp30\n\t"
+ "fld.p %0, (16*8), fp32\n\t"
+ "fld.p %0, (17*8), fp34\n\t"
+ "fld.p %0, (18*8), fp36\n\t"
+ "fld.p %0, (19*8), fp38\n\t"
+ "fld.p %0, (20*8), fp40\n\t"
+ "fld.p %0, (21*8), fp42\n\t"
+ "fld.p %0, (22*8), fp44\n\t"
+ "fld.p %0, (23*8), fp46\n\t"
+ "fld.p %0, (24*8), fp48\n\t"
+ "fld.p %0, (25*8), fp50\n\t"
+ "fld.p %0, (26*8), fp52\n\t"
+ "fld.p %0, (27*8), fp54\n\t"
+ "fld.p %0, (28*8), fp56\n\t"
+ "fld.p %0, (29*8), fp58\n\t"
+ "fld.p %0, (30*8), fp60\n\t"
+
+ "fld.s %0, (32*8), fr63\n\t"
+ "fputscr fr63\n\t"
+
+ "fld.p %0, (31*8), fp62\n\t"
+ : /* no output */
+ : "r" (fpregs) );
+}
+
+void fpinit(struct sh_fpu_hard_struct *fpregs)
+{
+ *fpregs = init_fpuregs.hard;
+}
+
+asmlinkage void
+do_fpu_error(unsigned long ex, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ regs->pc += 4;
+
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+ force_sig(SIGFPE, tsk);
+}
+
+
+asmlinkage void
+do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
+{
+ void die(const char *str, struct pt_regs *regs, long err);
+
+ if (! user_mode(regs))
+ die("FPU used in kernel", regs, ex);
+
+ regs->sr &= ~SR_FD;
+
+ if (last_task_used_math == current)
+ return;
+
+ enable_fpu();
+ if (last_task_used_math != NULL)
+ /* Other processes fpu state, save away */
+ save_fpu(last_task_used_math, regs);
+
+ last_task_used_math = current;
+ if (used_math()) {
+ fpload(&current->thread.fpu.hard);
+ } else {
+ /* First time FPU user. */
+ fpload(&init_fpuregs.hard);
+ set_used_math();
+ }
+ disable_fpu();
+}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
new file mode 100644
index 00000000000..15d167fd0ae
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -0,0 +1,76 @@
+/*
+ * arch/sh/kernel/cpu/sh5/probe.c
+ *
+ * CPU Subtype Probing for SH-5.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+ unsigned long long cir;
+
+ /* Do peeks in real mode to avoid having to set up a mapping for the
+ WPC registers. On SH5-101 cut2, such a mapping would be exposed to
+ an address translation erratum which would make it hard to set up
+ correctly. */
+ cir = peek_real_address_q(0x0d000008);
+ if ((cir & 0xffff) == 0x5103) {
+ boot_cpu_data.type = CPU_SH5_103;
+ } else if (((cir >> 32) & 0xffff) == 0x51e2) {
+ /* CPU.VCR aliased at CIR address on SH5-101 */
+ boot_cpu_data.type = CPU_SH5_101;
+ } else {
+ boot_cpu_data.type = CPU_SH_NONE;
+ }
+
+ /*
+ * First, setup some sane values for the I-cache.
+ */
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.icache.sets = 256;
+ boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+
+#if 0
+ /*
+ * FIXME: This can probably be cleaned up a bit as well.. for example,
+ * do we really need the way shift _and_ the way_step_shift ?? Judging
+ * by the existing code, I would guess no.. is there any valid reason
+ * why we need to be tracking this around?
+ */
+ boot_cpu_data.icache.way_shift = 13;
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.set_shift = 4;
+ boot_cpu_data.icache.way_step_shift = 16;
+ boot_cpu_data.icache.asid_shift = 2;
+
+ /*
+ * way offset = cache size / associativity, so just don't factor in
+ * associativity in the first place..
+ */
+ boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets *
+ boot_cpu_data.icache.linesz;
+
+ boot_cpu_data.icache.asid_mask = 0x3fc;
+ boot_cpu_data.icache.idx_mask = 0x1fe0;
+ boot_cpu_data.icache.epn_mask = 0xffffe000;
+#endif
+
+ boot_cpu_data.icache.flags = 0;
+
+ /* A trivial starting point.. */
+ memcpy(&boot_cpu_data.dcache,
+ &boot_cpu_data.icache, sizeof(struct cache_info));
+
+ return 0;
+}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
new file mode 100644
index 00000000000..45c351b0f1b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/switchto.S
@@ -0,0 +1,198 @@
+/*
+ * arch/sh/kernel/cpu/sh5/switchto.S
+ *
+ * sh64 context switch
+ *
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+*/
+
+ .section .text..SHmedia32,"ax"
+ .little
+
+ .balign 32
+
+ .type sh64_switch_to,@function
+ .global sh64_switch_to
+ .global __sh64_switch_to_end
+sh64_switch_to:
+
+/* Incoming args
+ r2 - prev
+ r3 - &prev->thread
+ r4 - next
+ r5 - &next->thread
+
+ Outgoing results
+ r2 - last (=prev) : this just stays in r2 throughout
+
+ Want to create a full (struct pt_regs) on the stack to allow backtracing
+ functions to work. However, we only need to populate the callee-save
+ register slots in this structure; since we're a function our ancestors must
+ have themselves preserved all caller saved state in the stack. This saves
+ some wasted effort since we won't need to look at the values.
+
+ In particular, all caller-save registers are immediately available for
+ scratch use.
+
+*/
+
+#define FRAME_SIZE (76*8 + 8)
+
+ movi FRAME_SIZE, r0
+ sub.l r15, r0, r15
+ ! Do normal-style register save to support backtrace
+
+ st.l r15, 0, r18 ! save link reg
+ st.l r15, 4, r14 ! save fp
+ add.l r15, r63, r14 ! setup frame pointer
+
+ ! hopefully this looks normal to the backtrace now.
+
+ addi.l r15, 8, r1 ! base of pt_regs
+ addi.l r1, 24, r0 ! base of pt_regs.regs
+ addi.l r0, (63*8), r8 ! base of pt_regs.trregs
+
+ /* Note : to be fixed?
+ struct pt_regs is really designed for holding the state on entry
+ to an exception, i.e. pc,sr,regs etc. However, for the context
+ switch state, some of this is not required. But the unwinder takes
+ struct pt_regs * as an arg so we have to build this structure
+ to allow unwinding switched tasks in show_state() */
+
+ st.q r0, ( 9*8), r9
+ st.q r0, (10*8), r10
+ st.q r0, (11*8), r11
+ st.q r0, (12*8), r12
+ st.q r0, (13*8), r13
+ st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
+ ! the point where the process is left in suspended animation, i.e. current
+ ! fp here, not the saved one.
+ st.q r0, (16*8), r16
+
+ st.q r0, (24*8), r24
+ st.q r0, (25*8), r25
+ st.q r0, (26*8), r26
+ st.q r0, (27*8), r27
+ st.q r0, (28*8), r28
+ st.q r0, (29*8), r29
+ st.q r0, (30*8), r30
+ st.q r0, (31*8), r31
+ st.q r0, (32*8), r32
+ st.q r0, (33*8), r33
+ st.q r0, (34*8), r34
+ st.q r0, (35*8), r35
+
+ st.q r0, (44*8), r44
+ st.q r0, (45*8), r45
+ st.q r0, (46*8), r46
+ st.q r0, (47*8), r47
+ st.q r0, (48*8), r48
+ st.q r0, (49*8), r49
+ st.q r0, (50*8), r50
+ st.q r0, (51*8), r51
+ st.q r0, (52*8), r52
+ st.q r0, (53*8), r53
+ st.q r0, (54*8), r54
+ st.q r0, (55*8), r55
+ st.q r0, (56*8), r56
+ st.q r0, (57*8), r57
+ st.q r0, (58*8), r58
+ st.q r0, (59*8), r59
+
+ ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
+ ! Use a local label to avoid creating a symbol that will confuse the !
+ ! backtrace
+ pta .Lsave_pc, tr0
+
+ gettr tr5, r45
+ gettr tr6, r46
+ gettr tr7, r47
+ st.q r8, (5*8), r45
+ st.q r8, (6*8), r46
+ st.q r8, (7*8), r47
+
+ ! Now switch context
+ gettr tr0, r9
+ st.l r3, 0, r15 ! prev->thread.sp
+ st.l r3, 8, r1 ! prev->thread.kregs
+ st.l r3, 4, r9 ! prev->thread.pc
+ st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
+
+ ! Load PC for next task (init value or save_pc later)
+ ld.l r5, 4, r18 ! next->thread.pc
+ ! Switch stacks
+ ld.l r5, 0, r15 ! next->thread.sp
+ ptabs r18, tr0
+
+ ! Update current
+ ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
+ putcon r9, kcr0 ! current = next->thread_info
+
+ ! go to save_pc for a reschedule, or the initial thread.pc for a new process
+ blink tr0, r63
+
+ ! Restore (when we come back to a previously saved task)
+.Lsave_pc:
+ addi.l r15, 32, r0 ! r0 = next's regs
+ addi.l r0, (63*8), r8 ! r8 = next's tr_regs
+
+ ld.q r8, (5*8), r45
+ ld.q r8, (6*8), r46
+ ld.q r8, (7*8), r47
+ ptabs r45, tr5
+ ptabs r46, tr6
+ ptabs r47, tr7
+
+ ld.q r0, ( 9*8), r9
+ ld.q r0, (10*8), r10
+ ld.q r0, (11*8), r11
+ ld.q r0, (12*8), r12
+ ld.q r0, (13*8), r13
+ ld.q r0, (14*8), r14
+ ld.q r0, (16*8), r16
+
+ ld.q r0, (24*8), r24
+ ld.q r0, (25*8), r25
+ ld.q r0, (26*8), r26
+ ld.q r0, (27*8), r27
+ ld.q r0, (28*8), r28
+ ld.q r0, (29*8), r29
+ ld.q r0, (30*8), r30
+ ld.q r0, (31*8), r31
+ ld.q r0, (32*8), r32
+ ld.q r0, (33*8), r33
+ ld.q r0, (34*8), r34
+ ld.q r0, (35*8), r35
+
+ ld.q r0, (44*8), r44
+ ld.q r0, (45*8), r45
+ ld.q r0, (46*8), r46
+ ld.q r0, (47*8), r47
+ ld.q r0, (48*8), r48
+ ld.q r0, (49*8), r49
+ ld.q r0, (50*8), r50
+ ld.q r0, (51*8), r51
+ ld.q r0, (52*8), r52
+ ld.q r0, (53*8), r53
+ ld.q r0, (54*8), r54
+ ld.q r0, (55*8), r55
+ ld.q r0, (56*8), r56
+ ld.q r0, (57*8), r57
+ ld.q r0, (58*8), r58
+ ld.q r0, (59*8), r59
+
+ ! epilogue
+ ld.l r15, 0, r18
+ ld.l r15, 4, r14
+ ptabs r18, tr0
+ movi FRAME_SIZE, r0
+ add r15, r0, r15
+ blink tr0, r63
+__sh64_switch_to_end:
+.LFE1:
+ .size sh64_switch_to,.LFE1-sh64_switch_to
+
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
new file mode 100644
index 00000000000..119c20afd4e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -0,0 +1,326 @@
+/*
+ * arch/sh/kernel/cpu/sh5/unwind.c
+ *
+ * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+static u8 regcache[63];
+
+/*
+ * Finding the previous stack frame isn't horribly straightforward as it is
+ * on some other platforms. In the sh64 case, we don't have "linked" stack
+ * frames, so we need to do a bit of work to determine the previous frame,
+ * and in turn, the previous r14/r18 pair.
+ *
+ * There are generally a few cases which determine where we can find out
+ * the r14/r18 values. In the general case, this can be determined by poking
+ * around the prologue of the symbol PC is in (note that we absolutely must
+ * have frame pointer support as well as the kernel symbol table mapped,
+ * otherwise we can't even get this far).
+ *
+ * In other cases, such as the interrupt/exception path, we can poke around
+ * the sp/fp.
+ *
+ * Notably, this entire approach is somewhat error prone, and in the event
+ * that the previous frame cannot be determined, that's all we can do.
+ * Either way, this still leaves us with a more correct backtrace then what
+ * we would be able to come up with by walking the stack (which is garbage
+ * for anything beyond the first frame).
+ * -- PFM.
+ */
+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+ unsigned long *pprev_fp, unsigned long *pprev_pc,
+ struct pt_regs *regs)
+{
+ const char *sym;
+ char namebuf[128];
+ unsigned long offset;
+ unsigned long prologue = 0;
+ unsigned long fp_displacement = 0;
+ unsigned long fp_prev = 0;
+ unsigned long offset_r14 = 0, offset_r18 = 0;
+ int i, found_prologue_end = 0;
+
+ sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
+ if (!sym)
+ return -EINVAL;
+
+ prologue = pc - offset;
+ if (!prologue)
+ return -EINVAL;
+
+ /* Validate fp, to avoid risk of dereferencing a bad pointer later.
+ Assume 128Mb since that's the amount of RAM on a Cayman. Modify
+ when there is an SH-5 board with more. */
+ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
+ (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
+ ((fp & 7) != 0)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Depth to walk, depth is completely arbitrary.
+ */
+ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
+ unsigned long op;
+ u8 major, minor;
+ u8 src, dest, disp;
+
+ op = *(unsigned long *)prologue;
+
+ major = (op >> 26) & 0x3f;
+ src = (op >> 20) & 0x3f;
+ minor = (op >> 16) & 0xf;
+ disp = (op >> 10) & 0x3f;
+ dest = (op >> 4) & 0x3f;
+
+ /*
+ * Stack frame creation happens in a number of ways.. in the
+ * general case when the stack frame is less than 511 bytes,
+ * it's generally created by an addi or addi.l:
+ *
+ * addi/addi.l r15, -FRAME_SIZE, r15
+ *
+ * in the event that the frame size is bigger than this, it's
+ * typically created using a movi/sub pair as follows:
+ *
+ * movi FRAME_SIZE, rX
+ * sub r15, rX, r15
+ */
+
+ switch (major) {
+ case (0x00 >> 2):
+ switch (minor) {
+ case 0x8: /* add.l */
+ case 0x9: /* add */
+ /* Look for r15, r63, r14 */
+ if (src == 15 && disp == 63 && dest == 14)
+ found_prologue_end = 1;
+
+ break;
+ case 0xa: /* sub.l */
+ case 0xb: /* sub */
+ if (src != 15 || dest != 15)
+ continue;
+
+ fp_displacement -= regcache[disp];
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+ break;
+ case (0xa8 >> 2): /* st.l */
+ if (src != 15)
+ continue;
+
+ switch (dest) {
+ case 14:
+ if (offset_r14 || fp_displacement == 0)
+ continue;
+
+ offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r14 *= sizeof(unsigned long);
+ offset_r14 += fp_displacement;
+ break;
+ case 18:
+ if (offset_r18 || fp_displacement == 0)
+ continue;
+
+ offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r18 *= sizeof(unsigned long);
+ offset_r18 += fp_displacement;
+ break;
+ }
+
+ break;
+ case (0xcc >> 2): /* movi */
+ if (dest >= 63) {
+ printk(KERN_NOTICE "%s: Invalid dest reg %d "
+ "specified in movi handler. Failed "
+ "opcode was 0x%lx: ", __FUNCTION__,
+ dest, op);
+
+ continue;
+ }
+
+ /* Sign extend */
+ regcache[dest] =
+ ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
+ break;
+ case (0xd0 >> 2): /* addi */
+ case (0xd4 >> 2): /* addi.l */
+ /* Look for r15, -FRAME_SIZE, r15 */
+ if (src != 15 || dest != 15)
+ continue;
+
+ /* Sign extended frame size.. */
+ fp_displacement +=
+ (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+
+ if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
+ break;
+ }
+
+ if (offset_r14 == 0 || fp_prev == 0) {
+ if (!offset_r14)
+ pr_debug("Unable to find r14 offset\n");
+ if (!fp_prev)
+ pr_debug("Unable to find previous fp\n");
+
+ return -EINVAL;
+ }
+
+ /* For innermost leaf function, there might not be a offset_r18 */
+ if (!*pprev_pc && (offset_r18 == 0))
+ return -EINVAL;
+
+ *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
+
+ if (offset_r18)
+ *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
+
+ *pprev_pc &= ~1;
+
+ return 0;
+}
+
+/* Don't put this on the stack since we'll want to call sh64_unwind
+ * when we're close to underflowing the stack anyway. */
+static struct pt_regs here_regs;
+
+extern const char syscall_ret;
+extern const char ret_from_syscall;
+extern const char ret_from_exception;
+extern const char ret_from_irq;
+
+static void sh64_unwind_inner(struct pt_regs *regs);
+
+static void unwind_nested (unsigned long pc, unsigned long fp)
+{
+ if ((fp >= __MEMORY_START) &&
+ ((fp & 7) == 0)) {
+ sh64_unwind_inner((struct pt_regs *) fp);
+ }
+}
+
+static void sh64_unwind_inner(struct pt_regs *regs)
+{
+ unsigned long pc, fp;
+ int ofs = 0;
+ int first_pass;
+
+ pc = regs->pc & ~1;
+ fp = regs->regs[14];
+
+ first_pass = 1;
+ for (;;) {
+ int cond;
+ unsigned long next_fp, next_pc;
+
+ if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ printk("SYSCALL\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ printk("SYSCALL (PREEMPTED)\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ /* In this case, the PC is discovered by lookup_prev_stack_frame but
+ it has 4 taken off it to look like the 'caller' */
+ if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ printk("EXCEPTION\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ printk("IRQ\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
+ ((pc & 3) == 0) && ((fp & 7) == 0));
+
+ pc -= ofs;
+
+ printk("[<%08lx>] ", pc);
+ print_symbol("%s\n", pc);
+
+ if (first_pass) {
+ /* If the innermost frame is a leaf function, it's
+ * possible that r18 is never saved out to the stack.
+ */
+ next_pc = regs->regs[18];
+ } else {
+ next_pc = 0;
+ }
+
+ if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
+ ofs = sizeof(unsigned long);
+ pc = next_pc & ~1;
+ fp = next_fp;
+ } else {
+ printk("Unable to lookup previous stack frame\n");
+ break;
+ }
+ first_pass = 0;
+ }
+
+ printk("\n");
+
+}
+
+void sh64_unwind(struct pt_regs *regs)
+{
+ if (!regs) {
+ /*
+ * Fetch current regs if we have no other saved state to back
+ * trace from.
+ */
+ regs = &here_regs;
+
+ __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
+ __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
+ __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
+
+ __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
+ __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
+ __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
+ __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
+ __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
+ __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
+ __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
+ __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
+
+ __asm__ __volatile__ (
+ "pta 0f, tr0\n\t"
+ "blink tr0, %0\n\t"
+ "0: nop"
+ : "=r" (regs->pc)
+ );
+ }
+
+ printk("\nCall Trace:\n");
+ sh64_unwind_inner(regs);
+}
+