summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/sh5
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu/sh5')
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile7
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2101
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c166
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S198
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c326
6 files changed, 2874 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
new file mode 100644
index 00000000000..8646363e9de
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-5 backends.
+#
+obj-y := entry.o probe.o switchto.o
+
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_KALLSYMS) += unwind.o
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 00000000000..ba8750176d9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2101 @@
+/*
+ * arch/sh/kernel/cpu/sh5/entry.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <asm/cpu/registers.h>
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * SR fields.
+ */
+#define SR_ASID_MASK 0x00ff0000
+#define SR_FD_MASK 0x00008000
+#define SR_SS 0x08000000
+#define SR_BL 0x10000000
+#define SR_MD 0x40000000
+
+/*
+ * Event code.
+ */
+#define EVENT_INTERRUPT 0
+#define EVENT_FAULT_TLB 1
+#define EVENT_FAULT_NOT_TLB 2
+#define EVENT_DEBUG 3
+
+/* EXPEVT values */
+#define RESET_CAUSE 0x20
+#define DEBUGSS_CAUSE 0x980
+
+/*
+ * Frame layout. Quad index.
+ */
+#define FRAME_T(x) FRAME_TBASE+(x*8)
+#define FRAME_R(x) FRAME_RBASE+(x*8)
+#define FRAME_S(x) FRAME_SBASE+(x*8)
+#define FSPC 0
+#define FSSR 1
+#define FSYSCALL_ID 2
+
+/* Arrange the save frame to be a multiple of 32 bytes long */
+#define FRAME_SBASE 0
+#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
+#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
+#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
+#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
+
+#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
+#define FP_FRAME_BASE 0
+
+#define SAVED_R2 0*8
+#define SAVED_R3 1*8
+#define SAVED_R4 2*8
+#define SAVED_R5 3*8
+#define SAVED_R18 4*8
+#define SAVED_R6 5*8
+#define SAVED_TR0 6*8
+
+/* These are the registers saved in the TLB path that aren't saved in the first
+ level of the normal one. */
+#define TLB_SAVED_R25 7*8
+#define TLB_SAVED_TR1 8*8
+#define TLB_SAVED_TR2 9*8
+#define TLB_SAVED_TR3 10*8
+#define TLB_SAVED_TR4 11*8
+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+ breakage otherwise. */
+#define TLB_SAVED_R0 12*8
+#define TLB_SAVED_R1 13*8
+
+#define CLI() \
+ getcon SR, r6; \
+ ori r6, 0xf0, r6; \
+ putcon r6, SR;
+
+#define STI() \
+ getcon SR, r6; \
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop() CLI()
+#else
+# define preempt_stop()
+# define resume_kernel restore_all
+#endif
+
+ .section .data, "aw"
+
+#define FAST_TLBMISS_STACK_CACHELINES 4
+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+
+/* Register back-up area for all exceptions */
+ .balign 32
+ /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+ * register saves etc. */
+ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+/* This is 32 byte aligned by construction */
+/* Register back-up area for all exceptions */
+reg_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+
+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+ * reentrancy. Note this area may be accessed via physical address.
+ * Align so this fits a whole single cache line, for ease of purging.
+ */
+ .balign 32,0,32
+resvec_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .balign 32,0,32
+
+/* Jump table of 3rd level handlers */
+trap_jtable:
+ .long do_exception_error /* 0x000 */
+ .long do_exception_error /* 0x020 */
+ .long tlb_miss_load /* 0x040 */
+ .long tlb_miss_store /* 0x060 */
+ ! ARTIFICIAL pseudo-EXPEVT setting
+ .long do_debug_interrupt /* 0x080 */
+ .long tlb_miss_load /* 0x0A0 */
+ .long tlb_miss_store /* 0x0C0 */
+ .long do_address_error_load /* 0x0E0 */
+ .long do_address_error_store /* 0x100 */
+#ifdef CONFIG_SH_FPU
+ .long do_fpu_error /* 0x120 */
+#else
+ .long do_exception_error /* 0x120 */
+#endif
+ .long do_exception_error /* 0x140 */
+ .long system_call /* 0x160 */
+ .long do_reserved_inst /* 0x180 */
+ .long do_illegal_slot_inst /* 0x1A0 */
+ .long do_exception_error /* 0x1C0 - NMI */
+ .long do_exception_error /* 0x1E0 */
+ .rept 15
+ .long do_IRQ /* 0x200 - 0x3C0 */
+ .endr
+ .long do_exception_error /* 0x3E0 */
+ .rept 32
+ .long do_IRQ /* 0x400 - 0x7E0 */
+ .endr
+ .long fpu_error_or_IRQA /* 0x800 */
+ .long fpu_error_or_IRQB /* 0x820 */
+ .long do_IRQ /* 0x840 */
+ .long do_IRQ /* 0x860 */
+ .rept 6
+ .long do_exception_error /* 0x880 - 0x920 */
+ .endr
+ .long do_software_break_point /* 0x940 */
+ .long do_exception_error /* 0x960 */
+ .long do_single_step /* 0x980 */
+
+ .rept 3
+ .long do_exception_error /* 0x9A0 - 0x9E0 */
+ .endr
+ .long do_IRQ /* 0xA00 */
+ .long do_IRQ /* 0xA20 */
+ .long itlb_miss_or_IRQ /* 0xA40 */
+ .long do_IRQ /* 0xA60 */
+ .long do_IRQ /* 0xA80 */
+ .long itlb_miss_or_IRQ /* 0xAA0 */
+ .long do_exception_error /* 0xAC0 */
+ .long do_address_error_exec /* 0xAE0 */
+ .rept 8
+ .long do_exception_error /* 0xB00 - 0xBE0 */
+ .endr
+ .rept 18
+ .long do_IRQ /* 0xC00 - 0xE20 */
+ .endr
+
+ .section .text64, "ax"
+
+/*
+ * --- Exception/Interrupt/Event Handling Section
+ */
+
+/*
+ * VBR and RESVEC blocks.
+ *
+ * First level handler for VBR-based exceptions.
+ *
+ * To avoid waste of space, align to the maximum text block size.
+ * This is assumed to be at most 128 bytes or 32 instructions.
+ * DO NOT EXCEED 32 instructions on the first level handlers !
+ *
+ * Also note that RESVEC is contained within the VBR block
+ * where the room left (1KB - TEXT_SIZE) allows placing
+ * the RESVEC block (at most 512B + TEXT_SIZE).
+ *
+ * So first (and only) level handler for RESVEC-based exceptions.
+ *
+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+ * and interrupt) we are a lot tight with register space until
+ * saving onto the stack frame, which is done in handle_exception().
+ *
+ */
+
+#define TEXT_SIZE 128
+#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
+
+ .balign TEXT_SIZE
+LVBR_block:
+ .space 256, 0 /* Power-on class handler, */
+ /* not required here */
+not_a_tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+ ! VBR+0x200
+ nop
+ .balign 256
+ ! VBR+0x300
+ nop
+ .balign 256
+ /*
+ * Instead of the natural .balign 1024 place RESVEC here
+ * respecting the final 1KB alignment.
+ */
+ .balign TEXT_SIZE
+ /*
+ * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+ * block making sure the final alignment is correct.
+ */
+tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, KCR1
+ movi reg_save_area, SP
+ /* SP is guaranteed 32-byte aligned. */
+ st.q SP, TLB_SAVED_R0 , r0
+ st.q SP, TLB_SAVED_R1 , r1
+ st.q SP, SAVED_R2 , r2
+ st.q SP, SAVED_R3 , r3
+ st.q SP, SAVED_R4 , r4
+ st.q SP, SAVED_R5 , r5
+ st.q SP, SAVED_R6 , r6
+ st.q SP, SAVED_R18, r18
+
+ /* Save R25 for safety; as/ld may want to use it to achieve the call to
+ * the code in mm/tlbmiss.c */
+ st.q SP, TLB_SAVED_R25, r25
+ gettr tr0, r2
+ gettr tr1, r3
+ gettr tr2, r4
+ gettr tr3, r5
+ gettr tr4, r18
+ st.q SP, SAVED_TR0 , r2
+ st.q SP, TLB_SAVED_TR1 , r3
+ st.q SP, TLB_SAVED_TR2 , r4
+ st.q SP, TLB_SAVED_TR3 , r5
+ st.q SP, TLB_SAVED_TR4 , r18
+
+ pt do_fast_page_fault, tr0
+ getcon SSR, r2
+ getcon EXPEVT, r3
+ getcon TEA, r4
+ shlri r2, 30, r2
+ andi r2, 1, r2 /* r2 = SSR.MD */
+ blink tr0, LINK
+
+ pt fixup_to_invoke_general_handler, tr1
+
+ /* If the fast path handler fixed the fault, just drop through quickly
+ to the restore code right away to return to the excepting context.
+ */
+ beqi/u r2, 0, tr1
+
+fast_tlb_miss_restore:
+ ld.q SP, SAVED_TR0, r2
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+
+ ptabs r2, tr0
+ ptabs r3, tr1
+ ptabs r4, tr2
+ ptabs r5, tr3
+ ptabs r18, tr4
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+ ld.q SP, SAVED_R2, r2
+ ld.q SP, SAVED_R3, r3
+ ld.q SP, SAVED_R4, r4
+ ld.q SP, SAVED_R5, r5
+ ld.q SP, SAVED_R6, r6
+ ld.q SP, SAVED_R18, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ getcon KCR1, SP
+ rte
+ nop /* for safety, in case the code is run on sh5-101 cut1.x */
+
+fixup_to_invoke_general_handler:
+
+ /* OK, new method. Restore stuff that's not expected to get saved into
+ the 'first-level' reg save area, then just fall through to setting
+ up the registers and calling the second-level handler. */
+
+ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
+ r25,tr1-4 and save r6 to get into the right state. */
+
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+
+ ptabs/u r3, tr1
+ ptabs/u r4, tr2
+ ptabs/u r5, tr3
+ ptabs/u r18, tr4
+
+ /* Set args for Non-debug, TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+ DOES END UP AT VBR+0x600 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .balign 256
+ /* VBR + 0x600 */
+
+interrupt:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for interrupt class handler */
+ getcon INTEVT, r2
+ movi ret_from_irq, r3
+ ori r3, 1, r3
+ movi EVENT_INTERRUPT, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+ .balign TEXT_SIZE /* let's waste the bare minimum */
+
+LVBR_block_end: /* Marker. Used for total checking */
+
+ .balign 256
+LRESVEC_block:
+ /* Panic handler. Called with MMU off. Possible causes/actions:
+ * - Reset: Jump to program start.
+ * - Single Step: Turn off Single Step & return.
+ * - Others: Call panic handler, passing PC as arg.
+ * (this may need to be extended...)
+ */
+reset_or_panic:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, DCR
+ /* First save r0-1 and tr0, as we need to use these */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ gettr tr0, r0
+ st.q SP, 32, r0
+
+ /* Check cause */
+ getcon EXPEVT, r0
+ movi RESET_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if reset */
+ movi _stext-CONFIG_PAGE_OFFSET, r0
+ ori r0, 1, r0
+ ptabs r0, tr0
+ beqi r1, 0, tr0 /* Jump to start address if reset */
+
+ getcon EXPEVT, r0
+ movi DEBUGSS_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if single step */
+ pta single_step_panic, tr0
+ beqi r1, 0, tr0 /* jump if single step */
+
+ /* Now jump to where we save the registers. */
+ movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ blink tr0, r63
+
+single_step_panic:
+ /* We are in a handler with Single Step set. We need to resume the
+ * handler, by turning on MMU & turning off Single Step. */
+ getcon SSR, r0
+ movi SR_MMU, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Restore EXPEVT, as the rte won't do this */
+ getcon PEXPEVT, r0
+ putcon r0, EXPEVT
+ /* Restore regs */
+ ld.q SP, 32, r0
+ ptabs r0, tr0
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+ getcon DCR, SP
+ synco
+ rte
+
+
+ .balign 256
+debug_exception:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /*
+ * Single step/software_break_point first level handler.
+ * Called with MMU off, so the first thing we do is enable it
+ * by doing an rte with appropriate SSR.
+ */
+ putcon SP, DCR
+ /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+
+ /* With the MMU off, we are bypassing the cache, so purge any
+ * data that will be made stale by the following stores.
+ */
+ ocbp SP, 0
+ synco
+
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ getcon SPC, r0
+ st.q SP, 16, r0
+ getcon SSR, r0
+ st.q SP, 24, r0
+
+ /* Enable MMU, block exceptions, set priv mode, disable single step */
+ movi SR_MMU | SR_BL | SR_MD, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Force control to debug_exception_2 when rte is executed */
+ movi debug_exeception_2, r0
+ ori r0, 1, r0 /* force SHmedia, just in case */
+ putcon r0, SPC
+ getcon DCR, SP
+ synco
+ rte
+debug_exeception_2:
+ /* Restore saved regs */
+ putcon SP, KCR1
+ movi resvec_save_area, SP
+ ld.q SP, 24, r0
+ putcon r0, SSR
+ ld.q SP, 16, r0
+ putcon r0, SPC
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for debug class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_DEBUG, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+debug_interrupt:
+ /* !!! WE COME HERE IN REAL MODE !!! */
+ /* Hook-up debug interrupt to allow various debugging options to be
+ * hooked into its handler. */
+ /* Save original stack pointer into KCR1 */
+ synco
+ putcon SP, KCR1
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ ocbp SP, 0
+ ocbp SP, 32
+ synco
+
+ /* Save other original registers into reg_save_area thru real addresses */
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* move (spc,ssr)->(pspc,pssr). The rte will shift
+ them back again, so that they look like the originals
+ as far as the real handler code is concerned. */
+ getcon spc, r6
+ putcon r6, pspc
+ getcon ssr, r6
+ putcon r6, pssr
+
+ ! construct useful SR for handle_exception
+ movi 3, r6
+ shlli r6, 30, r6
+ getcon sr, r18
+ or r18, r6, r6
+ putcon r6, ssr
+
+ ! SSR is now the current SR with the MD and MMU bits set
+ ! i.e. the rte will switch back to priv mode and put
+ ! the mmu back on
+
+ ! construct spc
+ movi handle_exception, r18
+ ori r18, 1, r18 ! for safety (do we need this?)
+ putcon r18, spc
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+
+ ! EXPEVT==0x80 is unused, so 'steal' this value to put the
+ ! debug interrupt handler in the vectoring table
+ movi 0x80, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+
+ or SP, ZERO, r5
+ movi CONFIG_PAGE_OFFSET, r6
+ add r6, r5, r5
+ getcon KCR1, SP
+
+ synco ! for safety
+ rte ! -> handle_exception, switch back to priv mode again
+
+LRESVEC_block_end: /* Marker. Unused. */
+
+ .balign TEXT_SIZE
+
+/*
+ * Second level handler for VBR-based exceptions. Pre-handler.
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (KCR0) Current [current task union]
+ * (KCR1) Original SP
+ * (r2) INTEVT/EXPEVT
+ * (r3) appropriate return address
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
+ * (r5) Pointer to reg_save_area
+ * (SP) Original SP
+ *
+ * Available registers:
+ * (r6)
+ * (r18)
+ * (tr0)
+ *
+ */
+handle_exception:
+ /* Common 2nd level handler. */
+
+ /* First thing we need an appropriate stack pointer */
+ getcon SSR, r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta stack_ok, tr0
+ bne r6, ZERO, tr0 /* Original stack pointer is fine */
+
+ /* Set stack pointer for user fault */
+ getcon KCR0, SP
+ movi THREAD_SIZE, r6 /* Point to the end */
+ add SP, r6, SP
+
+stack_ok:
+
+/* DEBUG : check for underflow/overflow of the kernel stack */
+ pta no_underflow, tr0
+ getcon KCR0, r6
+ movi 1024, r18
+ add r6, r18, r6
+ bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
+
+/* Just panic to cause a crash. */
+bad_sp:
+ ld.b r63, 0, r6
+ nop
+
+no_underflow:
+ pta bad_sp, tr0
+ getcon kcr0, r6
+ movi THREAD_SIZE, r18
+ add r18, r6, r6
+ bgt SP, r6, tr0 ! sp above the stack
+
+ /* Make some room for the BASIC frame. */
+ movi -(FRAME_SIZE), r6
+ add SP, r6, SP
+
+/* Could do this with no stalling if we had another spare register, but the
+ code below will be OK. */
+ ld.q r5, SAVED_R2, r6
+ ld.q r5, SAVED_R3, r18
+ st.q SP, FRAME_R(2), r6
+ ld.q r5, SAVED_R4, r6
+ st.q SP, FRAME_R(3), r18
+ ld.q r5, SAVED_R5, r18
+ st.q SP, FRAME_R(4), r6
+ ld.q r5, SAVED_R6, r6
+ st.q SP, FRAME_R(5), r18
+ ld.q r5, SAVED_R18, r18
+ st.q SP, FRAME_R(6), r6
+ ld.q r5, SAVED_TR0, r6
+ st.q SP, FRAME_R(18), r18
+ st.q SP, FRAME_T(0), r6
+
+ /* Keep old SP around */
+ getcon KCR1, r6
+
+ /* Save the rest of the general purpose registers */
+ st.q SP, FRAME_R(0), r0
+ st.q SP, FRAME_R(1), r1
+ st.q SP, FRAME_R(7), r7
+ st.q SP, FRAME_R(8), r8
+ st.q SP, FRAME_R(9), r9
+ st.q SP, FRAME_R(10), r10
+ st.q SP, FRAME_R(11), r11
+ st.q SP, FRAME_R(12), r12
+ st.q SP, FRAME_R(13), r13
+ st.q SP, FRAME_R(14), r14
+
+ /* SP is somewhere else */
+ st.q SP, FRAME_R(15), r6
+
+ st.q SP, FRAME_R(16), r16
+ st.q SP, FRAME_R(17), r17
+ /* r18 is saved earlier. */
+ st.q SP, FRAME_R(19), r19
+ st.q SP, FRAME_R(20), r20
+ st.q SP, FRAME_R(21), r21
+ st.q SP, FRAME_R(22), r22
+ st.q SP, FRAME_R(23), r23
+ st.q SP, FRAME_R(24), r24
+ st.q SP, FRAME_R(25), r25
+ st.q SP, FRAME_R(26), r26
+ st.q SP, FRAME_R(27), r27
+ st.q SP, FRAME_R(28), r28
+ st.q SP, FRAME_R(29), r29
+ st.q SP, FRAME_R(30), r30
+ st.q SP, FRAME_R(31), r31
+ st.q SP, FRAME_R(32), r32
+ st.q SP, FRAME_R(33), r33
+ st.q SP, FRAME_R(34), r34
+ st.q SP, FRAME_R(35), r35
+ st.q SP, FRAME_R(36), r36
+ st.q SP, FRAME_R(37), r37
+ st.q SP, FRAME_R(38), r38
+ st.q SP, FRAME_R(39), r39
+ st.q SP, FRAME_R(40), r40
+ st.q SP, FRAME_R(41), r41
+ st.q SP, FRAME_R(42), r42
+ st.q SP, FRAME_R(43), r43
+ st.q SP, FRAME_R(44), r44
+ st.q SP, FRAME_R(45), r45
+ st.q SP, FRAME_R(46), r46
+ st.q SP, FRAME_R(47), r47
+ st.q SP, FRAME_R(48), r48
+ st.q SP, FRAME_R(49), r49
+ st.q SP, FRAME_R(50), r50
+ st.q SP, FRAME_R(51), r51
+ st.q SP, FRAME_R(52), r52
+ st.q SP, FRAME_R(53), r53
+ st.q SP, FRAME_R(54), r54
+ st.q SP, FRAME_R(55), r55
+ st.q SP, FRAME_R(56), r56
+ st.q SP, FRAME_R(57), r57
+ st.q SP, FRAME_R(58), r58
+ st.q SP, FRAME_R(59), r59
+ st.q SP, FRAME_R(60), r60
+ st.q SP, FRAME_R(61), r61
+ st.q SP, FRAME_R(62), r62
+
+ /*
+ * Save the S* registers.
+ */
+ getcon SSR, r61
+ st.q SP, FRAME_S(FSSR), r61
+ getcon SPC, r62
+ st.q SP, FRAME_S(FSPC), r62
+ movi -1, r62 /* Reset syscall_nr */
+ st.q SP, FRAME_S(FSYSCALL_ID), r62
+
+ /* Save the rest of the target registers */
+ gettr tr1, r6
+ st.q SP, FRAME_T(1), r6
+ gettr tr2, r6
+ st.q SP, FRAME_T(2), r6
+ gettr tr3, r6
+ st.q SP, FRAME_T(3), r6
+ gettr tr4, r6
+ st.q SP, FRAME_T(4), r6
+ gettr tr5, r6
+ st.q SP, FRAME_T(5), r6
+ gettr tr6, r6
+ st.q SP, FRAME_T(6), r6
+ gettr tr7, r6
+ st.q SP, FRAME_T(7), r6
+
+ ! setup FP so that unwinder can wind back through nested kernel mode
+ ! exceptions
+ add SP, ZERO, r14
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* We've pushed all the registers now, so only r2-r4 hold anything
+ * useful. Move them into callee save registers */
+ or r2, ZERO, r28
+ or r3, ZERO, r29
+ or r4, ZERO, r30
+
+ /* Preserve r2 as the event code */
+ movi evt_debug, r3
+ ori r3, 1, r3
+ ptabs r3, tr0
+
+ or SP, ZERO, r6
+ getcon TRA, r5
+ blink tr0, LINK
+
+ or r28, ZERO, r2
+ or r29, ZERO, r3
+ or r30, ZERO, r4
+#endif
+
+ /* For syscall and debug race condition, get TRA now */
+ getcon TRA, r5
+
+ /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
+ * Also set FD, to catch FPU usage in the kernel.
+ *
+ * benedict.gaster@superh.com 29/07/2002
+ *
+ * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
+ * same time change BL from 1->0, as any pending interrupt of a level
+ * higher than he previous value of IMASK will leak through and be
+ * taken unexpectedly.
+ *
+ * To avoid this we raise the IMASK and then issue another PUTCON to
+ * enable interrupts.
+ */
+ getcon SR, r6
+ movi SR_IMASK | SR_FD, r7
+ or r6, r7, r6
+ putcon r6, SR
+ movi SR_UNBLOCK_EXC, r7
+ and r6, r7, r6
+ putcon r6, SR
+
+
+ /* Now call the appropriate 3rd level handler */
+ or r3, ZERO, LINK
+ movi trap_jtable, r3
+ shlri r2, 3, r2
+ ldx.l r2, r3, r3
+ shlri r2, 2, r2
+ ptabs r3, tr0
+ or SP, ZERO, r3
+ blink tr0, ZERO
+
+/*
+ * Second level handler for VBR-based exceptions. Post-handlers.
+ *
+ * Post-handlers for interrupts (ret_from_irq), exceptions
+ * (ret_from_exception) and common reentrance doors (restore_all
+ * to get back to the original context, ret_from_syscall loop to
+ * check kernel exiting).
+ *
+ * ret_with_reschedule and work_notifysig are an inner lables of
+ * the ret_from_syscall loop.
+ *
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (SP) struct pt_regs *, original register's frame pointer (basic)
+ *
+ */
+ .global ret_from_irq
+ret_from_irq:
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_irq, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+ STI()
+ pta ret_with_reschedule, tr0
+ blink tr0, ZERO /* Do not check softirqs */
+
+ .global ret_from_exception
+ret_from_exception:
+ preempt_stop()
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ pta evt_debug_ret_from_exc, tr0
+ ori SP, 0, r2
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+
+ /* Check softirqs */
+
+#ifdef CONFIG_PREEMPT
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+resume_kernel:
+ pta restore_all, tr0
+
+ getcon KCR0, r6
+ ld.l r6, TI_PRE_COUNT, r7
+ beq/u r7, ZERO, tr0
+
+need_resched:
+ ld.l r6, TI_FLAGS, r7
+ movi (1 << TIF_NEED_RESCHED), r8
+ and r8, r7, r8
+ bne r8, ZERO, tr0
+
+ getcon SR, r7
+ andi r7, 0xf0, r7
+ bne r7, ZERO, tr0
+
+ movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
+ shori (PREEMPT_ACTIVE & 65535), r8
+ st.l r6, TI_PRE_COUNT, r8
+
+ STI()
+ movi schedule, r7
+ ori r7, 1, r7
+ ptabs r7, tr1
+ blink tr1, LINK
+
+ st.l r6, TI_PRE_COUNT, ZERO
+ CLI()
+
+ pta need_resched, tr1
+ blink tr1, ZERO
+#endif
+
+ .global ret_from_syscall
+ret_from_syscall:
+
+ret_with_reschedule:
+ getcon KCR0, r6 ! r6 contains current_thread_info
+ ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
+
+ movi _TIF_NEED_RESCHED, r8
+ and r8, r7, r8
+ pta work_resched, tr0
+ bne r8, ZERO, tr0
+
+ pta restore_all, tr1
+
+ movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+ and r8, r7, r8
+ pta work_notifysig, tr0
+ bne r8, ZERO, tr0
+
+ blink tr1, ZERO
+
+work_resched:
+ pta ret_from_syscall, tr0
+ gettr tr0, LINK
+ movi schedule, r6
+ ptabs r6, tr0
+ blink tr0, ZERO /* Call schedule(), return on top */
+
+work_notifysig:
+ gettr tr1, LINK
+
+ movi do_signal, r6
+ ptabs r6, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ blink tr0, LINK /* Call do_signal(regs, 0), return here */
+
+restore_all:
+ /* Do prefetches */
+
+ ld.q SP, FRAME_T(0), r6
+ ld.q SP, FRAME_T(1), r7
+ ld.q SP, FRAME_T(2), r8
+ ld.q SP, FRAME_T(3), r9
+ ptabs r6, tr0
+ ptabs r7, tr1
+ ptabs r8, tr2
+ ptabs r9, tr3
+ ld.q SP, FRAME_T(4), r6
+ ld.q SP, FRAME_T(5), r7
+ ld.q SP, FRAME_T(6), r8
+ ld.q SP, FRAME_T(7), r9
+ ptabs r6, tr4
+ ptabs r7, tr5
+ ptabs r8, tr6
+ ptabs r9, tr7
+
+ ld.q SP, FRAME_R(0), r0
+ ld.q SP, FRAME_R(1), r1
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+ ld.q SP, FRAME_R(8), r8
+ ld.q SP, FRAME_R(9), r9
+ ld.q SP, FRAME_R(10), r10
+ ld.q SP, FRAME_R(11), r11
+ ld.q SP, FRAME_R(12), r12
+ ld.q SP, FRAME_R(13), r13
+ ld.q SP, FRAME_R(14), r14
+
+ ld.q SP, FRAME_R(16), r16
+ ld.q SP, FRAME_R(17), r17
+ ld.q SP, FRAME_R(18), r18
+ ld.q SP, FRAME_R(19), r19
+ ld.q SP, FRAME_R(20), r20
+ ld.q SP, FRAME_R(21), r21
+ ld.q SP, FRAME_R(22), r22
+ ld.q SP, FRAME_R(23), r23
+ ld.q SP, FRAME_R(24), r24
+ ld.q SP, FRAME_R(25), r25
+ ld.q SP, FRAME_R(26), r26
+ ld.q SP, FRAME_R(27), r27
+ ld.q SP, FRAME_R(28), r28
+ ld.q SP, FRAME_R(29), r29
+ ld.q SP, FRAME_R(30), r30
+ ld.q SP, FRAME_R(31), r31
+ ld.q SP, FRAME_R(32), r32
+ ld.q SP, FRAME_R(33), r33
+ ld.q SP, FRAME_R(34), r34
+ ld.q SP, FRAME_R(35), r35
+ ld.q SP, FRAME_R(36), r36
+ ld.q SP, FRAME_R(37), r37
+ ld.q SP, FRAME_R(38), r38
+ ld.q SP, FRAME_R(39), r39
+ ld.q SP, FRAME_R(40), r40
+ ld.q SP, FRAME_R(41), r41
+ ld.q SP, FRAME_R(42), r42
+ ld.q SP, FRAME_R(43), r43
+ ld.q SP, FRAME_R(44), r44
+ ld.q SP, FRAME_R(45), r45
+ ld.q SP, FRAME_R(46), r46
+ ld.q SP, FRAME_R(47), r47
+ ld.q SP, FRAME_R(48), r48
+ ld.q SP, FRAME_R(49), r49
+ ld.q SP, FRAME_R(50), r50
+ ld.q SP, FRAME_R(51), r51
+ ld.q SP, FRAME_R(52), r52
+ ld.q SP, FRAME_R(53), r53
+ ld.q SP, FRAME_R(54), r54
+ ld.q SP, FRAME_R(55), r55
+ ld.q SP, FRAME_R(56), r56
+ ld.q SP, FRAME_R(57), r57
+ ld.q SP, FRAME_R(58), r58
+
+ getcon SR, r59
+ movi SR_BLOCK_EXC, r60
+ or r59, r60, r59
+ putcon r59, SR /* SR.BL = 1, keep nesting out */
+ ld.q SP, FRAME_S(FSSR), r61
+ ld.q SP, FRAME_S(FSPC), r62
+ movi SR_ASID_MASK, r60
+ and r59, r60, r59
+ andc r61, r60, r61 /* Clear out older ASID */
+ or r59, r61, r61 /* Retain current ASID */
+ putcon r61, SSR
+ putcon r62, SPC
+
+ /* Ignore FSYSCALL_ID */
+
+ ld.q SP, FRAME_R(59), r59
+ ld.q SP, FRAME_R(60), r60
+ ld.q SP, FRAME_R(61), r61
+ ld.q SP, FRAME_R(62), r62
+
+ /* Last touch */
+ ld.q SP, FRAME_R(15), SP
+ rte
+ nop
+
+/*
+ * Third level handlers for VBR-based exceptions. Adapting args to
+ * and/or deflecting to fourth level handlers.
+ *
+ * Fourth level handlers interface.
+ * Most are C-coded handlers directly pointed by the trap_jtable.
+ * (Third = Fourth level)
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
+ * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
+ * (r5) TRA control register (for syscall/debug benefit only)
+ * (LINK) return address
+ * (SP) = r3
+ *
+ * Kernel TLB fault handlers will get a slightly different interface.
+ * (r2) struct pt_regs *, original register's frame pointer
+ * (r3) writeaccess, whether it's a store fault as opposed to load fault
+ * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
+ * (r5) Effective Address of fault
+ * (LINK) return address
+ * (SP) = r2
+ *
+ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
+ *
+ */
+tlb_miss_load:
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+tlb_miss_store:
+ or SP, ZERO, r2
+ movi 1, r3 /* Write */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+itlb_miss_or_IRQ:
+ pta its_IRQ, tr0
+ beqi/u r4, EVENT_INTERRUPT, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ movi 1, r4 /* Text */
+ getcon TEA, r5
+ /* Fall through */
+
+call_do_page_fault:
+ movi do_page_fault, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQA:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQB:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi do_fpu_state_restore, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+its_IRQ:
+ movi do_IRQ, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+/*
+ * system_call/unknown_trap third level handler:
+ *
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (TRAP = 11)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
+ * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
+ * (SP) = r3
+ * (LINK) return address: ret_from_exception
+ * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
+ *
+ * Outputs:
+ * (*r3) Syscall reply (Saved r2)
+ * (LINK) In case of syscall only it can be scrapped.
+ * Common second level post handler will be ret_from_syscall.
+ * Common (non-trace) exit point to that is syscall_ret (saving
+ * result to r2). Common bad exit point is syscall_bad (returning
+ * ENOSYS then saved to r2).
+ *
+ */
+
+unknown_trap:
+ /* Unknown Trap or User Trace */
+ movi do_unknown_trapa, r6
+ ptabs r6, tr0
+ ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
+ andi r2, 0x1ff, r2 /* r2 = syscall # */
+ blink tr0, LINK
+
+ pta syscall_ret, tr0
+ blink tr0, ZERO
+
+ /* New syscall implementation*/
+system_call:
+ pta unknown_trap, tr0
+ or r5, ZERO, r4 /* TRA (=r5) -> r4 */
+ shlri r4, 20, r4
+ bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
+
+ /* It's a system call */
+ st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
+ andi r5, 0x1ff, r5 /* syscall # -> r5 */
+
+ STI()
+
+ pta syscall_allowed, tr0
+ movi NR_syscalls - 1, r4 /* Last valid */
+ bgeu/l r4, r5, tr0
+
+syscall_bad:
+ /* Return ENOSYS ! */
+ movi -(ENOSYS), r2 /* Fall-through */
+
+ .global syscall_ret
+syscall_ret:
+ st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+/* A different return path for ret_from_fork, because we now need
+ * to call schedule_tail with the later kernels. Because prev is
+ * loaded into r2 by switch_to() means we can just call it straight away
+ */
+
+.global ret_from_fork
+ret_from_fork:
+
+ movi schedule_tail,r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ blink tr0, LINK
+
+#ifdef CONFIG_POOR_MANS_STRACE
+ /* nothing useful in registers at this point */
+
+ movi evt_debug2, r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ ld.q SP, FRAME_R(9), r2
+ or SP, ZERO, r3
+ blink tr0, LINK
+#endif
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+
+syscall_allowed:
+ /* Use LINK to deflect the exit point, default is syscall_ret */
+ pta syscall_ret, tr0
+ gettr tr0, LINK
+ pta syscall_notrace, tr0
+
+ getcon KCR0, r2
+ ld.l r2, TI_FLAGS, r4
+ movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
+ and r6, r4, r6
+ beq/l r6, ZERO, tr0
+
+ /* Trace it by calling syscall_trace before and after */
+ movi syscall_trace, r4
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3
+ ptabs r4, tr0
+ blink tr0, LINK
+
+ /* Reload syscall number as r5 is trashed by syscall_trace */
+ ld.q SP, FRAME_S(FSYSCALL_ID), r5
+ andi r5, 0x1ff, r5
+
+ pta syscall_ret_trace, tr0
+ gettr tr0, LINK
+
+syscall_notrace:
+ /* Now point to the appropriate 4th level syscall handler */
+ movi sys_call_table, r4
+ shlli r5, 2, r5
+ ldx.l r4, r5, r5
+ ptabs r5, tr0
+
+ /* Prepare original args */
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+
+ /* And now the trick for those syscalls requiring regs * ! */
+ or SP, ZERO, r8
+
+ /* Call it */
+ blink tr0, ZERO /* LINK is already properly set */
+
+syscall_ret_trace:
+ /* We get back here only if under trace */
+ st.q SP, FRAME_R(9), r2 /* Save return value */
+
+ movi syscall_trace, LINK
+ or SP, ZERO, r2
+ movi 1, r3
+ ptabs LINK, tr0
+ blink tr0, LINK
+
+ /* This needs to be done after any syscall tracing */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO /* Resume normal return sequence */
+
+/*
+ * --- Switch to running under a particular ASID and return the previous ASID value
+ * --- The caller is assumed to have done a cli before calling this.
+ *
+ * Input r2 : new ASID
+ * Output r2 : old ASID
+ */
+
+ .global switch_and_save_asid
+switch_and_save_asid:
+ getcon sr, r0
+ movi 255, r4
+ shlli r4, 16, r4 /* r4 = mask to select ASID */
+ and r0, r4, r3 /* r3 = shifted old ASID */
+ andi r2, 255, r2 /* mask down new ASID */
+ shlli r2, 16, r2 /* align new ASID against SR.ASID */
+ andc r0, r4, r0 /* efface old ASID from SR */
+ or r0, r2, r0 /* insert the new ASID */
+ putcon r0, ssr
+ movi 1f, r0
+ putcon r0, spc
+ rte
+ nop
+1:
+ ptabs LINK, tr0
+ shlri r3, 16, r2 /* r2 = old ASID */
+ blink tr0, r63
+
+ .global route_to_panic_handler
+route_to_panic_handler:
+ /* Switch to real mode, goto panic_handler, don't return. Useful for
+ last-chance debugging, e.g. if no output wants to go to the console.
+ */
+
+ movi panic_handler - CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ pta 1f, tr1
+ gettr tr1, r0
+ putcon r0, spc
+ getcon sr, r0
+ movi 1, r1
+ shlli r1, 31, r1
+ andc r0, r1, r0
+ putcon r0, ssr
+ rte
+ nop
+1: /* Now in real mode */
+ blink tr0, r63
+ nop
+
+ .global peek_real_address_q
+peek_real_address_q:
+ /* Two args:
+ r2 : real mode address to peek
+ r2(out) : result quadword
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.peek0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual peek. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ ld.q r2, 0, r2
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+ .global poke_real_address_q
+poke_real_address_q:
+ /* Two args:
+ r2 : real mode address to poke
+ r3 : quadword value to write.
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to onchip_remap the debug
+ module, and to avoid the need to onchip_remap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.poke0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual poke. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ st.q r2, 0, r3
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+/*
+ * --- User Access Handling Section
+ */
+
+/*
+ * User Access support. It all moved to non inlined Assembler
+ * functions in here.
+ *
+ * __kernel_size_t __copy_user(void *__to, const void *__from,
+ * __kernel_size_t __n)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) size in bytes
+ *
+ * Ouputs:
+ * (*r2) target data
+ * (r2) non-copied bytes
+ *
+ * If a fault occurs on the user pointer, bail out early and return the
+ * number of bytes not copied in r2.
+ * Strategy : for large blocks, call a real memcpy function which can
+ * move >1 byte at a time using unaligned ld/st instructions, and can
+ * manipulate the cache using prefetch + alloco to improve the speed
+ * further. If a fault occurs in that function, just revert to the
+ * byte-by-byte approach used for small blocks; this is rare so the
+ * performance hit for that case does not matter.
+ *
+ * For small blocks it's not worth the overhead of setting up and calling
+ * the memcpy routine; do the copy a byte at a time.
+ *
+ */
+ .global __copy_user
+__copy_user:
+ pta __copy_user_byte_by_byte, tr1
+ movi 16, r0 ! this value is a best guess, should tune it by benchmarking
+ bge/u r0, r4, tr1
+ pta copy_user_memcpy, tr0
+ addi SP, -32, SP
+ /* Save arguments in case we have to fix-up unhandled page fault */
+ st.q SP, 0, r2
+ st.q SP, 8, r3
+ st.q SP, 16, r4
+ st.q SP, 24, r35 ! r35 is callee-save
+ /* Save LINK in a register to reduce RTS time later (otherwise
+ ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
+ ori LINK, 0, r35
+ blink tr0, LINK
+
+ /* Copy completed normally if we get back here */
+ ptabs r35, tr0
+ ld.q SP, 24, r35
+ /* don't restore r2-r4, pointless */
+ /* set result=r2 to zero as the copy must have succeeded. */
+ or r63, r63, r2
+ addi SP, 32, SP
+ blink tr0, r63 ! RTS
+
+ .global __copy_user_fixup
+__copy_user_fixup:
+ /* Restore stack frame */
+ ori r35, 0, LINK
+ ld.q SP, 24, r35
+ ld.q SP, 16, r4
+ ld.q SP, 8, r3
+ ld.q SP, 0, r2
+ addi SP, 32, SP
+ /* Fall through to original code, in the 'same' state we entered with */
+
+/* The slow byte-by-byte method is used if the fast copy traps due to a bad
+ user address. In that rare case, the speed drop can be tolerated. */
+__copy_user_byte_by_byte:
+ pta ___copy_user_exit, tr1
+ pta ___copy_user1, tr0
+ beq/u r4, r63, tr1 /* early exit for zero length copy */
+ sub r2, r3, r0
+ addi r0, -1, r0
+
+___copy_user1:
+ ld.b r3, 0, r5 /* Fault address 1 */
+
+ /* Could rewrite this to use just 1 add, but the second comes 'free'
+ due to load latency */
+ addi r3, 1, r3
+ addi r4, -1, r4 /* No real fixup required */
+___copy_user2:
+ stx.b r3, r0, r5 /* Fault address 2 */
+ bne r4, ZERO, tr0
+
+___copy_user_exit:
+ or r4, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) size in bytes
+ *
+ * Ouputs:
+ * (*r2) zero-ed target data
+ * (r2) non-zero-ed bytes
+ */
+ .global __clear_user
+__clear_user:
+ pta ___clear_user_exit, tr1
+ pta ___clear_user1, tr0
+ beq/u r3, r63, tr1
+
+___clear_user1:
+ st.b r2, 0, ZERO /* Fault address */
+ addi r2, 1, r2
+ addi r3, -1, r3 /* No real fixup required */
+ bne r3, ZERO, tr0
+
+___clear_user_exit:
+ or r3, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+/*
+ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
+ * int __count)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) maximum size in bytes
+ *
+ * Ouputs:
+ * (*r2) copied data
+ * (r2) -EFAULT (in case of faulting)
+ * copied data (otherwise)
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ pta ___strncpy_from_user1, tr0
+ pta ___strncpy_from_user_done, tr1
+ or r4, ZERO, r5 /* r5 = original count */
+ beq/u r4, r63, tr1 /* early exit if r4==0 */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+
+___strncpy_from_user1:
+ ld.b r3, 0, r7 /* Fault address: only in reading */
+ st.b r2, 0, r7
+ addi r2, 1, r2
+ addi r3, 1, r3
+ beq/u ZERO, r7, tr1
+ addi r4, -1, r4 /* return real number of copied bytes */
+ bne/l ZERO, r4, tr0
+
+___strncpy_from_user_done:
+ sub r5, r4, r6 /* If done, return copied */
+
+___strncpy_from_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __strnlen_user(const char *__s, long __n)
+ *
+ * Inputs:
+ * (r2) source address
+ * (r3) source size in bytes
+ *
+ * Ouputs:
+ * (r2) -EFAULT (in case of faulting)
+ * string length (otherwise)
+ */
+ .global __strnlen_user
+__strnlen_user:
+ pta ___strnlen_user_set_reply, tr0
+ pta ___strnlen_user1, tr1
+ or ZERO, ZERO, r5 /* r5 = counter */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+ beq r3, ZERO, tr0
+
+___strnlen_user1:
+ ldx.b r2, r5, r7 /* Fault address: only in reading */
+ addi r3, -1, r3 /* No real fixup */
+ addi r5, 1, r5
+ beq r3, ZERO, tr0
+ bne r7, ZERO, tr1
+! The line below used to be active. This meant led to a junk byte lying between each pair
+! of entries in the argv & envp structures in memory. Whilst the program saw the right data
+! via the argv and envp arguments to main, it meant the 'flat' representation visible through
+! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
+! addi r5, 1, r5 /* Include '\0' */
+
+___strnlen_user_set_reply:
+ or r5, ZERO, r6 /* If done, return counter */
+
+___strnlen_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __get_user_asm_?(void *val, long addr)
+ *
+ * Inputs:
+ * (r2) dest address
+ * (r3) source address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __get_user_asm_b
+__get_user_asm_b:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_b1:
+ ld.b r3, 0, r5 /* r5 = data */
+ st.b r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_w
+__get_user_asm_w:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_w1:
+ ld.w r3, 0, r5 /* r5 = data */
+ st.w r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_l
+__get_user_asm_l:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_l1:
+ ld.l r3, 0, r5 /* r5 = data */
+ st.l r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_q
+__get_user_asm_q:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_q1:
+ ld.q r3, 0, r5 /* r5 = data */
+ st.q r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __put_user_asm_?(void *pval, long addr)
+ *
+ * Inputs:
+ * (r2) kernel pointer to value
+ * (r3) dest address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __put_user_asm_b
+__put_user_asm_b:
+ ld.b r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_b1:
+ st.b r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_w
+__put_user_asm_w:
+ ld.w r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_w1:
+ st.w r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_l
+__put_user_asm_l:
+ ld.l r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_l1:
+ st.l r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_q
+__put_user_asm_q:
+ ld.q r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_q1:
+ st.q r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+panic_stash_regs:
+ /* The idea is : when we get an unhandled panic, we dump the registers
+ to a known memory location, the just sit in a tight loop.
+ This allows the human to look at the memory region through the GDB
+ session (assuming the debug module's SHwy initiator isn't locked up
+ or anything), to hopefully analyze the cause of the panic. */
+
+ /* On entry, former r15 (SP) is in DCR
+ former r0 is at resvec_saved_area + 0
+ former r1 is at resvec_saved_area + 8
+ former tr0 is at resvec_saved_area + 32
+ DCR is the only register whose value is lost altogether.
+ */
+
+ movi 0xffffffff80000000, r0 ! phy of dump area
+ ld.q SP, 0x000, r1 ! former r0
+ st.q r0, 0x000, r1
+ ld.q SP, 0x008, r1 ! former r1
+ st.q r0, 0x008, r1
+ st.q r0, 0x010, r2
+ st.q r0, 0x018, r3
+ st.q r0, 0x020, r4
+ st.q r0, 0x028, r5
+ st.q r0, 0x030, r6
+ st.q r0, 0x038, r7
+ st.q r0, 0x040, r8
+ st.q r0, 0x048, r9
+ st.q r0, 0x050, r10
+ st.q r0, 0x058, r11
+ st.q r0, 0x060, r12
+ st.q r0, 0x068, r13
+ st.q r0, 0x070, r14
+ getcon dcr, r14
+ st.q r0, 0x078, r14
+ st.q r0, 0x080, r16
+ st.q r0, 0x088, r17
+ st.q r0, 0x090, r18
+ st.q r0, 0x098, r19
+ st.q r0, 0x0a0, r20
+ st.q r0, 0x0a8, r21
+ st.q r0, 0x0b0, r22
+ st.q r0, 0x0b8, r23
+ st.q r0, 0x0c0, r24
+ st.q r0, 0x0c8, r25
+ st.q r0, 0x0d0, r26
+ st.q r0, 0x0d8, r27
+ st.q r0, 0x0e0, r28
+ st.q r0, 0x0e8, r29
+ st.q r0, 0x0f0, r30
+ st.q r0, 0x0f8, r31
+ st.q r0, 0x100, r32
+ st.q r0, 0x108, r33
+ st.q r0, 0x110, r34
+ st.q r0, 0x118, r35
+ st.q r0, 0x120, r36
+ st.q r0, 0x128, r37
+ st.q r0, 0x130, r38
+ st.q r0, 0x138, r39
+ st.q r0, 0x140, r40
+ st.q r0, 0x148, r41
+ st.q r0, 0x150, r42
+ st.q r0, 0x158, r43
+ st.q r0, 0x160, r44
+ st.q r0, 0x168, r45
+ st.q r0, 0x170, r46
+ st.q r0, 0x178, r47
+ st.q r0, 0x180, r48
+ st.q r0, 0x188, r49
+ st.q r0, 0x190, r50
+ st.q r0, 0x198, r51
+ st.q r0, 0x1a0, r52
+ st.q r0, 0x1a8, r53
+ st.q r0, 0x1b0, r54
+ st.q r0, 0x1b8, r55
+ st.q r0, 0x1c0, r56
+ st.q r0, 0x1c8, r57
+ st.q r0, 0x1d0, r58
+ st.q r0, 0x1d8, r59
+ st.q r0, 0x1e0, r60
+ st.q r0, 0x1e8, r61
+ st.q r0, 0x1f0, r62
+ st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
+
+ ld.q SP, 0x020, r1 ! former tr0
+ st.q r0, 0x200, r1
+ gettr tr1, r1
+ st.q r0, 0x208, r1
+ gettr tr2, r1
+ st.q r0, 0x210, r1
+ gettr tr3, r1
+ st.q r0, 0x218, r1
+ gettr tr4, r1
+ st.q r0, 0x220, r1
+ gettr tr5, r1
+ st.q r0, 0x228, r1
+ gettr tr6, r1
+ st.q r0, 0x230, r1
+ gettr tr7, r1
+ st.q r0, 0x238, r1
+
+ getcon sr, r1
+ getcon ssr, r2
+ getcon pssr, r3
+ getcon spc, r4
+ getcon pspc, r5
+ getcon intevt, r6
+ getcon expevt, r7
+ getcon pexpevt, r8
+ getcon tra, r9
+ getcon tea, r10
+ getcon kcr0, r11
+ getcon kcr1, r12
+ getcon vbr, r13
+ getcon resvec, r14
+
+ st.q r0, 0x240, r1
+ st.q r0, 0x248, r2
+ st.q r0, 0x250, r3
+ st.q r0, 0x258, r4
+ st.q r0, 0x260, r5
+ st.q r0, 0x268, r6
+ st.q r0, 0x270, r7
+ st.q r0, 0x278, r8
+ st.q r0, 0x280, r9
+ st.q r0, 0x288, r10
+ st.q r0, 0x290, r11
+ st.q r0, 0x298, r12
+ st.q r0, 0x2a0, r13
+ st.q r0, 0x2a8, r14
+
+ getcon SPC,r2
+ getcon SSR,r3
+ getcon EXPEVT,r4
+ /* Prepare to jump to C - physical address */
+ movi panic_handler-CONFIG_PAGE_OFFSET, r1
+ ori r1, 1, r1
+ ptabs r1, tr0
+ getcon DCR, SP
+ blink tr0, ZERO
+ nop
+ nop
+ nop
+ nop
+
+
+
+
+/*
+ * --- Signal Handling Section
+ */
+
+/*
+ * extern long long _sa_default_rt_restorer
+ * extern long long _sa_default_restorer
+ *
+ * or, better,
+ *
+ * extern void _sa_default_rt_restorer(void)
+ * extern void _sa_default_restorer(void)
+ *
+ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
+ * from user space. Copied into user space by signal management.
+ * Both must be quad aligned and 2 quad long (4 instructions).
+ *
+ */
+ .balign 8
+ .global sa_default_rt_restorer
+sa_default_rt_restorer:
+ movi 0x10, r9
+ shori __NR_rt_sigreturn, r9
+ trapa r9
+ nop
+
+ .balign 8
+ .global sa_default_restorer
+sa_default_restorer:
+ movi 0x10, r9
+ shori __NR_sigreturn, r9
+ trapa r9
+ nop
+
+/*
+ * --- __ex_table Section
+ */
+
+/*
+ * User Access Exception Table.
+ */
+ .section __ex_table, "a"
+
+ .global asm_uaccess_start /* Just a marker */
+asm_uaccess_start:
+
+ .long ___copy_user1, ___copy_user_exit
+ .long ___copy_user2, ___copy_user_exit
+ .long ___clear_user1, ___clear_user_exit
+ .long ___strncpy_from_user1, ___strncpy_from_user_exit
+ .long ___strnlen_user1, ___strnlen_user_exit
+ .long ___get_user_asm_b1, ___get_user_asm_b_exit
+ .long ___get_user_asm_w1, ___get_user_asm_w_exit
+ .long ___get_user_asm_l1, ___get_user_asm_l_exit
+ .long ___get_user_asm_q1, ___get_user_asm_q_exit
+ .long ___put_user_asm_b1, ___put_user_asm_b_exit
+ .long ___put_user_asm_w1, ___put_user_asm_w_exit
+ .long ___put_user_asm_l1, ___put_user_asm_l_exit
+ .long ___put_user_asm_q1, ___put_user_asm_q_exit
+
+ .global asm_uaccess_end /* Just a marker */
+asm_uaccess_end:
+
+
+
+
+/*
+ * --- .text.init Section
+ */
+
+ .section .text.init, "ax"
+
+/*
+ * void trap_init (void)
+ *
+ */
+ .global trap_init
+trap_init:
+ addi SP, -24, SP /* Room to save r28/r29/r30 */
+ st.q SP, 0, r28
+ st.q SP, 8, r29
+ st.q SP, 16, r30
+
+ /* Set VBR and RESVEC */
+ movi LVBR_block, r19
+ andi r19, -4, r19 /* reset MMUOFF + reserved */
+ /* For RESVEC exceptions we force the MMU off, which means we need the
+ physical address. */
+ movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
+ andi r20, -4, r20 /* reset reserved */
+ ori r20, 1, r20 /* set MMUOFF */
+ putcon r19, VBR
+ putcon r20, RESVEC
+
+ /* Sanity check */
+ movi LVBR_block_end, r21
+ andi r21, -4, r21
+ movi BLOCK_SIZE, r29 /* r29 = expected size */
+ or r19, ZERO, r30
+ add r19, r29, r19
+
+ /*
+ * Ugly, but better loop forever now than crash afterwards.
+ * We should print a message, but if we touch LVBR or
+ * LRESVEC blocks we should not be surprised if we get stuck
+ * in trap_init().
+ */
+ pta trap_init_loop, tr1
+ gettr tr1, r28 /* r28 = trap_init_loop */
+ sub r21, r30, r30 /* r30 = actual size */
+
+ /*
+ * VBR/RESVEC handlers overlap by being bigger than
+ * allowed. Very bad. Just loop forever.
+ * (r28) panic/loop address
+ * (r29) expected size
+ * (r30) actual size
+ */
+trap_init_loop:
+ bne r19, r21, tr1
+
+ /* Now that exception vectors are set up reset SR.BL */
+ getcon SR, r22
+ movi SR_UNBLOCK_EXC, r23
+ and r22, r23, r22
+ putcon r22, SR
+
+ addi SP, 24, SP
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
new file mode 100644
index 00000000000..30b76a94abf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -0,0 +1,166 @@
+/*
+ * arch/sh/kernel/cpu/sh5/fpu.c
+ *
+ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
+ * Copyright (C) 2002 STMicroelectronics Limited
+ * Author : Stuart Menefy
+ *
+ * Started from SH4 version:
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/user.h>
+#include <asm/io.h>
+
+/*
+ * Initially load the FPU with signalling NANS. This bit pattern
+ * has the property that no matter whether considered as single or as
+ * double precision, it still represents a signalling NAN.
+ */
+#define sNAN64 0xFFFFFFFFFFFFFFFFULL
+#define sNAN32 0xFFFFFFFFUL
+
+static union sh_fpu_union init_fpuregs = {
+ .hard = {
+ .fp_regs = { [0 ... 63] = sNAN32 },
+ .fpscr = FPSCR_INIT
+ }
+};
+
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ asm volatile("fst.p %0, (0*8), fp0\n\t"
+ "fst.p %0, (1*8), fp2\n\t"
+ "fst.p %0, (2*8), fp4\n\t"
+ "fst.p %0, (3*8), fp6\n\t"
+ "fst.p %0, (4*8), fp8\n\t"
+ "fst.p %0, (5*8), fp10\n\t"
+ "fst.p %0, (6*8), fp12\n\t"
+ "fst.p %0, (7*8), fp14\n\t"
+ "fst.p %0, (8*8), fp16\n\t"
+ "fst.p %0, (9*8), fp18\n\t"
+ "fst.p %0, (10*8), fp20\n\t"
+ "fst.p %0, (11*8), fp22\n\t"
+ "fst.p %0, (12*8), fp24\n\t"
+ "fst.p %0, (13*8), fp26\n\t"
+ "fst.p %0, (14*8), fp28\n\t"
+ "fst.p %0, (15*8), fp30\n\t"
+ "fst.p %0, (16*8), fp32\n\t"
+ "fst.p %0, (17*8), fp34\n\t"
+ "fst.p %0, (18*8), fp36\n\t"
+ "fst.p %0, (19*8), fp38\n\t"
+ "fst.p %0, (20*8), fp40\n\t"
+ "fst.p %0, (21*8), fp42\n\t"
+ "fst.p %0, (22*8), fp44\n\t"
+ "fst.p %0, (23*8), fp46\n\t"
+ "fst.p %0, (24*8), fp48\n\t"
+ "fst.p %0, (25*8), fp50\n\t"
+ "fst.p %0, (26*8), fp52\n\t"
+ "fst.p %0, (27*8), fp54\n\t"
+ "fst.p %0, (28*8), fp56\n\t"
+ "fst.p %0, (29*8), fp58\n\t"
+ "fst.p %0, (30*8), fp60\n\t"
+ "fst.p %0, (31*8), fp62\n\t"
+
+ "fgetscr fr63\n\t"
+ "fst.s %0, (32*8), fr63\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.fpu.hard)
+ : "memory");
+}
+
+static inline void
+fpload(struct sh_fpu_hard_struct *fpregs)
+{
+ asm volatile("fld.p %0, (0*8), fp0\n\t"
+ "fld.p %0, (1*8), fp2\n\t"
+ "fld.p %0, (2*8), fp4\n\t"
+ "fld.p %0, (3*8), fp6\n\t"
+ "fld.p %0, (4*8), fp8\n\t"
+ "fld.p %0, (5*8), fp10\n\t"
+ "fld.p %0, (6*8), fp12\n\t"
+ "fld.p %0, (7*8), fp14\n\t"
+ "fld.p %0, (8*8), fp16\n\t"
+ "fld.p %0, (9*8), fp18\n\t"
+ "fld.p %0, (10*8), fp20\n\t"
+ "fld.p %0, (11*8), fp22\n\t"
+ "fld.p %0, (12*8), fp24\n\t"
+ "fld.p %0, (13*8), fp26\n\t"
+ "fld.p %0, (14*8), fp28\n\t"
+ "fld.p %0, (15*8), fp30\n\t"
+ "fld.p %0, (16*8), fp32\n\t"
+ "fld.p %0, (17*8), fp34\n\t"
+ "fld.p %0, (18*8), fp36\n\t"
+ "fld.p %0, (19*8), fp38\n\t"
+ "fld.p %0, (20*8), fp40\n\t"
+ "fld.p %0, (21*8), fp42\n\t"
+ "fld.p %0, (22*8), fp44\n\t"
+ "fld.p %0, (23*8), fp46\n\t"
+ "fld.p %0, (24*8), fp48\n\t"
+ "fld.p %0, (25*8), fp50\n\t"
+ "fld.p %0, (26*8), fp52\n\t"
+ "fld.p %0, (27*8), fp54\n\t"
+ "fld.p %0, (28*8), fp56\n\t"
+ "fld.p %0, (29*8), fp58\n\t"
+ "fld.p %0, (30*8), fp60\n\t"
+
+ "fld.s %0, (32*8), fr63\n\t"
+ "fputscr fr63\n\t"
+
+ "fld.p %0, (31*8), fp62\n\t"
+ : /* no output */
+ : "r" (fpregs) );
+}
+
+void fpinit(struct sh_fpu_hard_struct *fpregs)
+{
+ *fpregs = init_fpuregs.hard;
+}
+
+asmlinkage void
+do_fpu_error(unsigned long ex, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ regs->pc += 4;
+
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+ force_sig(SIGFPE, tsk);
+}
+
+
+asmlinkage void
+do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
+{
+ void die(const char *str, struct pt_regs *regs, long err);
+
+ if (! user_mode(regs))
+ die("FPU used in kernel", regs, ex);
+
+ regs->sr &= ~SR_FD;
+
+ if (last_task_used_math == current)
+ return;
+
+ enable_fpu();
+ if (last_task_used_math != NULL)
+ /* Other processes fpu state, save away */
+ save_fpu(last_task_used_math, regs);
+
+ last_task_used_math = current;
+ if (used_math()) {
+ fpload(&current->thread.fpu.hard);
+ } else {
+ /* First time FPU user. */
+ fpload(&init_fpuregs.hard);
+ set_used_math();
+ }
+ disable_fpu();
+}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
new file mode 100644
index 00000000000..15d167fd0ae
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -0,0 +1,76 @@
+/*
+ * arch/sh/kernel/cpu/sh5/probe.c
+ *
+ * CPU Subtype Probing for SH-5.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+ unsigned long long cir;
+
+ /* Do peeks in real mode to avoid having to set up a mapping for the
+ WPC registers. On SH5-101 cut2, such a mapping would be exposed to
+ an address translation erratum which would make it hard to set up
+ correctly. */
+ cir = peek_real_address_q(0x0d000008);
+ if ((cir & 0xffff) == 0x5103) {
+ boot_cpu_data.type = CPU_SH5_103;
+ } else if (((cir >> 32) & 0xffff) == 0x51e2) {
+ /* CPU.VCR aliased at CIR address on SH5-101 */
+ boot_cpu_data.type = CPU_SH5_101;
+ } else {
+ boot_cpu_data.type = CPU_SH_NONE;
+ }
+
+ /*
+ * First, setup some sane values for the I-cache.
+ */
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.icache.sets = 256;
+ boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+
+#if 0
+ /*
+ * FIXME: This can probably be cleaned up a bit as well.. for example,
+ * do we really need the way shift _and_ the way_step_shift ?? Judging
+ * by the existing code, I would guess no.. is there any valid reason
+ * why we need to be tracking this around?
+ */
+ boot_cpu_data.icache.way_shift = 13;
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.set_shift = 4;
+ boot_cpu_data.icache.way_step_shift = 16;
+ boot_cpu_data.icache.asid_shift = 2;
+
+ /*
+ * way offset = cache size / associativity, so just don't factor in
+ * associativity in the first place..
+ */
+ boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets *
+ boot_cpu_data.icache.linesz;
+
+ boot_cpu_data.icache.asid_mask = 0x3fc;
+ boot_cpu_data.icache.idx_mask = 0x1fe0;
+ boot_cpu_data.icache.epn_mask = 0xffffe000;
+#endif
+
+ boot_cpu_data.icache.flags = 0;
+
+ /* A trivial starting point.. */
+ memcpy(&boot_cpu_data.dcache,
+ &boot_cpu_data.icache, sizeof(struct cache_info));
+
+ return 0;
+}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
new file mode 100644
index 00000000000..45c351b0f1b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/switchto.S
@@ -0,0 +1,198 @@
+/*
+ * arch/sh/kernel/cpu/sh5/switchto.S
+ *
+ * sh64 context switch
+ *
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+*/
+
+ .section .text..SHmedia32,"ax"
+ .little
+
+ .balign 32
+
+ .type sh64_switch_to,@function
+ .global sh64_switch_to
+ .global __sh64_switch_to_end
+sh64_switch_to:
+
+/* Incoming args
+ r2 - prev
+ r3 - &prev->thread
+ r4 - next
+ r5 - &next->thread
+
+ Outgoing results
+ r2 - last (=prev) : this just stays in r2 throughout
+
+ Want to create a full (struct pt_regs) on the stack to allow backtracing
+ functions to work. However, we only need to populate the callee-save
+ register slots in this structure; since we're a function our ancestors must
+ have themselves preserved all caller saved state in the stack. This saves
+ some wasted effort since we won't need to look at the values.
+
+ In particular, all caller-save registers are immediately available for
+ scratch use.
+
+*/
+
+#define FRAME_SIZE (76*8 + 8)
+
+ movi FRAME_SIZE, r0
+ sub.l r15, r0, r15
+ ! Do normal-style register save to support backtrace
+
+ st.l r15, 0, r18 ! save link reg
+ st.l r15, 4, r14 ! save fp
+ add.l r15, r63, r14 ! setup frame pointer
+
+ ! hopefully this looks normal to the backtrace now.
+
+ addi.l r15, 8, r1 ! base of pt_regs
+ addi.l r1, 24, r0 ! base of pt_regs.regs
+ addi.l r0, (63*8), r8 ! base of pt_regs.trregs
+
+ /* Note : to be fixed?
+ struct pt_regs is really designed for holding the state on entry
+ to an exception, i.e. pc,sr,regs etc. However, for the context
+ switch state, some of this is not required. But the unwinder takes
+ struct pt_regs * as an arg so we have to build this structure
+ to allow unwinding switched tasks in show_state() */
+
+ st.q r0, ( 9*8), r9
+ st.q r0, (10*8), r10
+ st.q r0, (11*8), r11
+ st.q r0, (12*8), r12
+ st.q r0, (13*8), r13
+ st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
+ ! the point where the process is left in suspended animation, i.e. current
+ ! fp here, not the saved one.
+ st.q r0, (16*8), r16
+
+ st.q r0, (24*8), r24
+ st.q r0, (25*8), r25
+ st.q r0, (26*8), r26
+ st.q r0, (27*8), r27
+ st.q r0, (28*8), r28
+ st.q r0, (29*8), r29
+ st.q r0, (30*8), r30
+ st.q r0, (31*8), r31
+ st.q r0, (32*8), r32
+ st.q r0, (33*8), r33
+ st.q r0, (34*8), r34
+ st.q r0, (35*8), r35
+
+ st.q r0, (44*8), r44
+ st.q r0, (45*8), r45
+ st.q r0, (46*8), r46
+ st.q r0, (47*8), r47
+ st.q r0, (48*8), r48
+ st.q r0, (49*8), r49
+ st.q r0, (50*8), r50
+ st.q r0, (51*8), r51
+ st.q r0, (52*8), r52
+ st.q r0, (53*8), r53
+ st.q r0, (54*8), r54
+ st.q r0, (55*8), r55
+ st.q r0, (56*8), r56
+ st.q r0, (57*8), r57
+ st.q r0, (58*8), r58
+ st.q r0, (59*8), r59
+
+ ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
+ ! Use a local label to avoid creating a symbol that will confuse the !
+ ! backtrace
+ pta .Lsave_pc, tr0
+
+ gettr tr5, r45
+ gettr tr6, r46
+ gettr tr7, r47
+ st.q r8, (5*8), r45
+ st.q r8, (6*8), r46
+ st.q r8, (7*8), r47
+
+ ! Now switch context
+ gettr tr0, r9
+ st.l r3, 0, r15 ! prev->thread.sp
+ st.l r3, 8, r1 ! prev->thread.kregs
+ st.l r3, 4, r9 ! prev->thread.pc
+ st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
+
+ ! Load PC for next task (init value or save_pc later)
+ ld.l r5, 4, r18 ! next->thread.pc
+ ! Switch stacks
+ ld.l r5, 0, r15 ! next->thread.sp
+ ptabs r18, tr0
+
+ ! Update current
+ ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
+ putcon r9, kcr0 ! current = next->thread_info
+
+ ! go to save_pc for a reschedule, or the initial thread.pc for a new process
+ blink tr0, r63
+
+ ! Restore (when we come back to a previously saved task)
+.Lsave_pc:
+ addi.l r15, 32, r0 ! r0 = next's regs
+ addi.l r0, (63*8), r8 ! r8 = next's tr_regs
+
+ ld.q r8, (5*8), r45
+ ld.q r8, (6*8), r46
+ ld.q r8, (7*8), r47
+ ptabs r45, tr5
+ ptabs r46, tr6
+ ptabs r47, tr7
+
+ ld.q r0, ( 9*8), r9
+ ld.q r0, (10*8), r10
+ ld.q r0, (11*8), r11
+ ld.q r0, (12*8), r12
+ ld.q r0, (13*8), r13
+ ld.q r0, (14*8), r14
+ ld.q r0, (16*8), r16
+
+ ld.q r0, (24*8), r24
+ ld.q r0, (25*8), r25
+ ld.q r0, (26*8), r26
+ ld.q r0, (27*8), r27
+ ld.q r0, (28*8), r28
+ ld.q r0, (29*8), r29
+ ld.q r0, (30*8), r30
+ ld.q r0, (31*8), r31
+ ld.q r0, (32*8), r32
+ ld.q r0, (33*8), r33
+ ld.q r0, (34*8), r34
+ ld.q r0, (35*8), r35
+
+ ld.q r0, (44*8), r44
+ ld.q r0, (45*8), r45
+ ld.q r0, (46*8), r46
+ ld.q r0, (47*8), r47
+ ld.q r0, (48*8), r48
+ ld.q r0, (49*8), r49
+ ld.q r0, (50*8), r50
+ ld.q r0, (51*8), r51
+ ld.q r0, (52*8), r52
+ ld.q r0, (53*8), r53
+ ld.q r0, (54*8), r54
+ ld.q r0, (55*8), r55
+ ld.q r0, (56*8), r56
+ ld.q r0, (57*8), r57
+ ld.q r0, (58*8), r58
+ ld.q r0, (59*8), r59
+
+ ! epilogue
+ ld.l r15, 0, r18
+ ld.l r15, 4, r14
+ ptabs r18, tr0
+ movi FRAME_SIZE, r0
+ add r15, r0, r15
+ blink tr0, r63
+__sh64_switch_to_end:
+.LFE1:
+ .size sh64_switch_to,.LFE1-sh64_switch_to
+
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
new file mode 100644
index 00000000000..119c20afd4e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -0,0 +1,326 @@
+/*
+ * arch/sh/kernel/cpu/sh5/unwind.c
+ *
+ * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+static u8 regcache[63];
+
+/*
+ * Finding the previous stack frame isn't horribly straightforward as it is
+ * on some other platforms. In the sh64 case, we don't have "linked" stack
+ * frames, so we need to do a bit of work to determine the previous frame,
+ * and in turn, the previous r14/r18 pair.
+ *
+ * There are generally a few cases which determine where we can find out
+ * the r14/r18 values. In the general case, this can be determined by poking
+ * around the prologue of the symbol PC is in (note that we absolutely must
+ * have frame pointer support as well as the kernel symbol table mapped,
+ * otherwise we can't even get this far).
+ *
+ * In other cases, such as the interrupt/exception path, we can poke around
+ * the sp/fp.
+ *
+ * Notably, this entire approach is somewhat error prone, and in the event
+ * that the previous frame cannot be determined, that's all we can do.
+ * Either way, this still leaves us with a more correct backtrace then what
+ * we would be able to come up with by walking the stack (which is garbage
+ * for anything beyond the first frame).
+ * -- PFM.
+ */
+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+ unsigned long *pprev_fp, unsigned long *pprev_pc,
+ struct pt_regs *regs)
+{
+ const char *sym;
+ char namebuf[128];
+ unsigned long offset;
+ unsigned long prologue = 0;
+ unsigned long fp_displacement = 0;
+ unsigned long fp_prev = 0;
+ unsigned long offset_r14 = 0, offset_r18 = 0;
+ int i, found_prologue_end = 0;
+
+ sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
+ if (!sym)
+ return -EINVAL;
+
+ prologue = pc - offset;
+ if (!prologue)
+ return -EINVAL;
+
+ /* Validate fp, to avoid risk of dereferencing a bad pointer later.
+ Assume 128Mb since that's the amount of RAM on a Cayman. Modify
+ when there is an SH-5 board with more. */
+ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
+ (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
+ ((fp & 7) != 0)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Depth to walk, depth is completely arbitrary.
+ */
+ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
+ unsigned long op;
+ u8 major, minor;
+ u8 src, dest, disp;
+
+ op = *(unsigned long *)prologue;
+
+ major = (op >> 26) & 0x3f;
+ src = (op >> 20) & 0x3f;
+ minor = (op >> 16) & 0xf;
+ disp = (op >> 10) & 0x3f;
+ dest = (op >> 4) & 0x3f;
+
+ /*
+ * Stack frame creation happens in a number of ways.. in the
+ * general case when the stack frame is less than 511 bytes,
+ * it's generally created by an addi or addi.l:
+ *
+ * addi/addi.l r15, -FRAME_SIZE, r15
+ *
+ * in the event that the frame size is bigger than this, it's
+ * typically created using a movi/sub pair as follows:
+ *
+ * movi FRAME_SIZE, rX
+ * sub r15, rX, r15
+ */
+
+ switch (major) {
+ case (0x00 >> 2):
+ switch (minor) {
+ case 0x8: /* add.l */
+ case 0x9: /* add */
+ /* Look for r15, r63, r14 */
+ if (src == 15 && disp == 63 && dest == 14)
+ found_prologue_end = 1;
+
+ break;
+ case 0xa: /* sub.l */
+ case 0xb: /* sub */
+ if (src != 15 || dest != 15)
+ continue;
+
+ fp_displacement -= regcache[disp];
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+ break;
+ case (0xa8 >> 2): /* st.l */
+ if (src != 15)
+ continue;
+
+ switch (dest) {
+ case 14:
+ if (offset_r14 || fp_displacement == 0)
+ continue;
+
+ offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r14 *= sizeof(unsigned long);
+ offset_r14 += fp_displacement;
+ break;
+ case 18:
+ if (offset_r18 || fp_displacement == 0)
+ continue;
+
+ offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r18 *= sizeof(unsigned long);
+ offset_r18 += fp_displacement;
+ break;
+ }
+
+ break;
+ case (0xcc >> 2): /* movi */
+ if (dest >= 63) {
+ printk(KERN_NOTICE "%s: Invalid dest reg %d "
+ "specified in movi handler. Failed "
+ "opcode was 0x%lx: ", __FUNCTION__,
+ dest, op);
+
+ continue;
+ }
+
+ /* Sign extend */
+ regcache[dest] =
+ ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
+ break;
+ case (0xd0 >> 2): /* addi */
+ case (0xd4 >> 2): /* addi.l */
+ /* Look for r15, -FRAME_SIZE, r15 */
+ if (src != 15 || dest != 15)
+ continue;
+
+ /* Sign extended frame size.. */
+ fp_displacement +=
+ (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+
+ if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
+ break;
+ }
+
+ if (offset_r14 == 0 || fp_prev == 0) {
+ if (!offset_r14)
+ pr_debug("Unable to find r14 offset\n");
+ if (!fp_prev)
+ pr_debug("Unable to find previous fp\n");
+
+ return -EINVAL;
+ }
+
+ /* For innermost leaf function, there might not be a offset_r18 */
+ if (!*pprev_pc && (offset_r18 == 0))
+ return -EINVAL;
+
+ *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
+
+ if (offset_r18)
+ *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
+
+ *pprev_pc &= ~1;
+
+ return 0;
+}
+
+/* Don't put this on the stack since we'll want to call sh64_unwind
+ * when we're close to underflowing the stack anyway. */
+static struct pt_regs here_regs;
+
+extern const char syscall_ret;
+extern const char ret_from_syscall;
+extern const char ret_from_exception;
+extern const char ret_from_irq;
+
+static void sh64_unwind_inner(struct pt_regs *regs);
+
+static void unwind_nested (unsigned long pc, unsigned long fp)
+{
+ if ((fp >= __MEMORY_START) &&
+ ((fp & 7) == 0)) {
+ sh64_unwind_inner((struct pt_regs *) fp);
+ }
+}
+
+static void sh64_unwind_inner(struct pt_regs *regs)
+{
+ unsigned long pc, fp;
+ int ofs = 0;
+ int first_pass;
+
+ pc = regs->pc & ~1;
+ fp = regs->regs[14];
+
+ first_pass = 1;
+ for (;;) {
+ int cond;
+ unsigned long next_fp, next_pc;
+
+ if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ printk("SYSCALL\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ printk("SYSCALL (PREEMPTED)\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ /* In this case, the PC is discovered by lookup_prev_stack_frame but
+ it has 4 taken off it to look like the 'caller' */
+ if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ printk("EXCEPTION\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ printk("IRQ\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
+ ((pc & 3) == 0) && ((fp & 7) == 0));
+
+ pc -= ofs;
+
+ printk("[<%08lx>] ", pc);
+ print_symbol("%s\n", pc);
+
+ if (first_pass) {
+ /* If the innermost frame is a leaf function, it's
+ * possible that r18 is never saved out to the stack.
+ */
+ next_pc = regs->regs[18];
+ } else {
+ next_pc = 0;
+ }
+
+ if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
+ ofs = sizeof(unsigned long);
+ pc = next_pc & ~1;
+ fp = next_fp;
+ } else {
+ printk("Unable to lookup previous stack frame\n");
+ break;
+ }
+ first_pass = 0;
+ }
+
+ printk("\n");
+
+}
+
+void sh64_unwind(struct pt_regs *regs)
+{
+ if (!regs) {
+ /*
+ * Fetch current regs if we have no other saved state to back
+ * trace from.
+ */
+ regs = &here_regs;
+
+ __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
+ __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
+ __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
+
+ __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
+ __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
+ __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
+ __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
+ __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
+ __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
+ __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
+ __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
+
+ __asm__ __volatile__ (
+ "pta 0f, tr0\n\t"
+ "blink tr0, %0\n\t"
+ "0: nop"
+ : "=r" (regs->pc)
+ );
+ }
+
+ printk("\nCall Trace:\n");
+ sh64_unwind_inner(regs);
+}
+