summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/asm-offsets.c21
-rw-r--r--arch/microblaze/kernel/early_printk.c3
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/entry.S1116
-rw-r--r--arch/microblaze/kernel/exceptions.c45
-rw-r--r--arch/microblaze/kernel/head.S190
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S746
-rw-r--r--arch/microblaze/kernel/intc.c4
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c2
-rw-r--r--arch/microblaze/kernel/misc.S120
-rw-r--r--arch/microblaze/kernel/process.c59
-rw-r--r--arch/microblaze/kernel/prom.c7
-rw-r--r--arch/microblaze/kernel/setup.c62
-rw-r--r--arch/microblaze/kernel/signal.c109
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/traps.c42
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S5
18 files changed, 2387 insertions, 153 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index da94bec4ecb..f4a5e19a20e 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SELFMOD) += selfmod.o
obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
+obj-$(CONFIG_MMU) += misc.o
obj-y += entry$(MMUEXT).o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index aabd9e9423a..7bc7b68f97d 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
@@ -68,16 +69,26 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
+#ifdef CONFIG_MMU
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ BLANK();
+
+ DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
+ BLANK();
+#endif
/* struct thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_STATUS, offsetof(struct thread_info, status));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
BLANK();
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 4b0f0fdb9ca..7de84923ba0 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
base_addr = early_uartlite_console();
if (base_addr) {
early_console_initialized = 1;
+#ifdef CONFIG_MMU
+ early_console_reg_tlb_alloc(base_addr);
+#endif
early_printk("early_printk_console is enabled at 0x%08x\n",
base_addr);
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index f24b1268baa..1fce6b803f5 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -10,7 +10,7 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/registers.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
new file mode 100644
index 00000000000..91a0e7b185d
--- /dev/null
+++ b/arch/microblaze/kernel/entry.S
@@ -0,0 +1,1116 @@
+/*
+ * Low-level system-call handling, trap handlers and context-switching
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001,2002 NEC Corporation
+ * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * Written by Miles Bader <miles@gnu.org>
+ * Heavily modified by John Williams for Microblaze
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#include <asm/entry.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/exceptions.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#include <asm/page.h>
+#include <asm/unistd.h>
+
+#include <linux/errno.h>
+#include <asm/signal.h>
+
+/* The size of a state save frame. */
+#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
+
+/* The offset of the struct pt_regs in a `state save frame' on the stack. */
+#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
+
+#define C_ENTRY(name) .globl name; .align 4; name
+
+/*
+ * Various ways of setting and clearing BIP in flags reg.
+ * This is mucky, but necessary using microblaze version that
+ * allows msr ops to write to BIP
+ */
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ .macro clear_bip
+ msrclr r11, MSR_BIP
+ nop
+ .endm
+
+ .macro set_bip
+ msrset r11, MSR_BIP
+ nop
+ .endm
+
+ .macro clear_eip
+ msrclr r11, MSR_EIP
+ nop
+ .endm
+
+ .macro set_ee
+ msrset r11, MSR_EE
+ nop
+ .endm
+
+ .macro disable_irq
+ msrclr r11, MSR_IE
+ nop
+ .endm
+
+ .macro enable_irq
+ msrset r11, MSR_IE
+ nop
+ .endm
+
+ .macro set_ums
+ msrset r11, MSR_UMS
+ nop
+ msrclr r11, MSR_VMS
+ nop
+ .endm
+
+ .macro set_vms
+ msrclr r11, MSR_UMS
+ nop
+ msrset r11, MSR_VMS
+ nop
+ .endm
+
+ .macro clear_vms_ums
+ msrclr r11, MSR_VMS
+ nop
+ msrclr r11, MSR_UMS
+ nop
+ .endm
+#else
+ .macro clear_bip
+ mfs r11, rmsr
+ nop
+ andi r11, r11, ~MSR_BIP
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro set_bip
+ mfs r11, rmsr
+ nop
+ ori r11, r11, MSR_BIP
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro clear_eip
+ mfs r11, rmsr
+ nop
+ andi r11, r11, ~MSR_EIP
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro set_ee
+ mfs r11, rmsr
+ nop
+ ori r11, r11, MSR_EE
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro disable_irq
+ mfs r11, rmsr
+ nop
+ andi r11, r11, ~MSR_IE
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro enable_irq
+ mfs r11, rmsr
+ nop
+ ori r11, r11, MSR_IE
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro set_ums
+ mfs r11, rmsr
+ nop
+ ori r11, r11, MSR_VMS
+ andni r11, r11, MSR_UMS
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro set_vms
+ mfs r11, rmsr
+ nop
+ ori r11, r11, MSR_VMS
+ andni r11, r11, MSR_UMS
+ mts rmsr, r11
+ nop
+ .endm
+
+ .macro clear_vms_ums
+ mfs r11, rmsr
+ nop
+ andni r11, r11, (MSR_VMS|MSR_UMS)
+ mts rmsr,r11
+ nop
+ .endm
+#endif
+
+/* Define how to call high-level functions. With MMU, virtual mode must be
+ * enabled when calling the high-level function. Clobbers R11.
+ * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
+ */
+
+/* turn on virtual protected mode save */
+#define VM_ON \
+ set_ums; \
+ rted r0, 2f; \
+2: nop;
+
+/* turn off virtual protected mode save and user mode save*/
+#define VM_OFF \
+ clear_vms_ums; \
+ rted r0, TOPHYS(1f); \
+1: nop;
+
+#define SAVE_REGS \
+ swi r2, r1, PTO+PT_R2; /* Save SDA */ \
+ swi r5, r1, PTO+PT_R5; \
+ swi r6, r1, PTO+PT_R6; \
+ swi r7, r1, PTO+PT_R7; \
+ swi r8, r1, PTO+PT_R8; \
+ swi r9, r1, PTO+PT_R9; \
+ swi r10, r1, PTO+PT_R10; \
+ swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
+ swi r12, r1, PTO+PT_R12; \
+ swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
+ swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
+ swi r15, r1, PTO+PT_R15; /* Save LP */ \
+ swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
+ swi r19, r1, PTO+PT_R19; \
+ swi r20, r1, PTO+PT_R20; \
+ swi r21, r1, PTO+PT_R21; \
+ swi r22, r1, PTO+PT_R22; \
+ swi r23, r1, PTO+PT_R23; \
+ swi r24, r1, PTO+PT_R24; \
+ swi r25, r1, PTO+PT_R25; \
+ swi r26, r1, PTO+PT_R26; \
+ swi r27, r1, PTO+PT_R27; \
+ swi r28, r1, PTO+PT_R28; \
+ swi r29, r1, PTO+PT_R29; \
+ swi r30, r1, PTO+PT_R30; \
+ swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
+ mfs r11, rmsr; /* save MSR */ \
+ nop; \
+ swi r11, r1, PTO+PT_MSR;
+
+#define RESTORE_REGS \
+ lwi r11, r1, PTO+PT_MSR; \
+ mts rmsr , r11; \
+ nop; \
+ lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
+ lwi r5, r1, PTO+PT_R5; \
+ lwi r6, r1, PTO+PT_R6; \
+ lwi r7, r1, PTO+PT_R7; \
+ lwi r8, r1, PTO+PT_R8; \
+ lwi r9, r1, PTO+PT_R9; \
+ lwi r10, r1, PTO+PT_R10; \
+ lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
+ lwi r12, r1, PTO+PT_R12; \
+ lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
+ lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
+ lwi r15, r1, PTO+PT_R15; /* restore LP */ \
+ lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
+ lwi r19, r1, PTO+PT_R19; \
+ lwi r20, r1, PTO+PT_R20; \
+ lwi r21, r1, PTO+PT_R21; \
+ lwi r22, r1, PTO+PT_R22; \
+ lwi r23, r1, PTO+PT_R23; \
+ lwi r24, r1, PTO+PT_R24; \
+ lwi r25, r1, PTO+PT_R25; \
+ lwi r26, r1, PTO+PT_R26; \
+ lwi r27, r1, PTO+PT_R27; \
+ lwi r28, r1, PTO+PT_R28; \
+ lwi r29, r1, PTO+PT_R29; \
+ lwi r30, r1, PTO+PT_R30; \
+ lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
+
+.text
+
+/*
+ * User trap.
+ *
+ * System calls are handled here.
+ *
+ * Syscall protocol:
+ * Syscall number in r12, args in r5-r10
+ * Return value in r3
+ *
+ * Trap entered via brki instruction, so BIP bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ */
+C_ENTRY(_user_exception):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ addi r14, r14, 4 /* return address is 4 byte after call */
+ swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
+
+ lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
+ beqi r11, 1f; /* Jump ahead if coming from user */
+/* Kernel-mode state save. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+ tophys(r1,r11);
+ swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+ SAVE_REGS
+
+ addi r11, r0, 1; /* Was in kernel-mode. */
+ swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
+ brid 2f;
+ nop; /* Fill delay slot */
+
+/* User-mode state save. */
+1:
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
+/* calculate kernel stack pointer from task struct 8k */
+ addik r1, r1, THREAD_SIZE;
+ tophys(r1,r1);
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+ SAVE_REGS
+
+ swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PTO+PT_R1; /* Store user SP. */
+ addi r11, r0, 1;
+ swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
+2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ /* Save away the syscall number. */
+ swi r12, r1, PTO+PT_R0;
+ tovirt(r1,r1)
+
+ la r15, r0, ret_from_trap-8
+/* where the trap should return need -8 to adjust for rtsd r15, 8*/
+/* Jump to the appropriate function for the system call number in r12
+ * (r12 is not preserved), or return an error if r12 is not valid. The LP
+ * register should point to the location where
+ * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
+ /* See if the system call number is valid. */
+ addi r11, r12, -__NR_syscalls;
+ bgei r11,1f;
+ /* Figure out which function to use for this system call. */
+ /* Note Microblaze barrel shift is optional, so don't rely on it */
+ add r12, r12, r12; /* convert num -> ptr */
+ add r12, r12, r12;
+
+ /* Trac syscalls and stored them to r0_ram */
+ lwi r3, r12, 0x400 + TOPHYS(r0_ram)
+ addi r3, r3, 1
+ swi r3, r12, 0x400 + TOPHYS(r0_ram)
+
+ lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
+ /* Make the system call. to r12*/
+ set_vms;
+ rtid r12, 0;
+ nop;
+ /* The syscall number is invalid, return an error. */
+1: VM_ON; /* RETURN() expects virtual mode*/
+ addi r3, r0, -ENOSYS;
+ rtsd r15,8; /* looks like a normal subroutine return */
+ or r0, r0, r0
+
+
+/* Entry point used to return from a syscall/trap. */
+/* We re-enable BIP bit before state restore */
+C_ENTRY(ret_from_trap):
+ set_bip; /* Ints masked for state restore*/
+ lwi r11, r1, PTO+PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c. */
+ bnei r11, 2f;
+
+ /* We're returning to user mode, so check for various conditions that
+ * trigger rescheduling. */
+ /* Get current task ptr into r11 */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+ swi r3, r1, PTO + PT_R3; /* store syscall result */
+ swi r4, r1, PTO + PT_R4;
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+ lwi r3, r1, PTO + PT_R3; /* restore syscall result */
+ lwi r4, r1, PTO + PT_R4;
+
+ /* Maybe handle a signal */
+5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_SIGPENDING;
+ beqi r11, 1f; /* Signals to handle, handle them */
+
+ swi r3, r1, PTO + PT_R3; /* store syscall result */
+ swi r4, r1, PTO + PT_R4;
+ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+ addi r7, r0, 1; /* Arg 3: int in_syscall */
+ bralid r15, do_signal; /* Handle any signals */
+ nop;
+ lwi r3, r1, PTO + PT_R3; /* restore syscall result */
+ lwi r4, r1, PTO + PT_R4;
+
+/* Finally, return to user state. */
+1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS;
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+ lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+ bri 6f;
+
+/* Return to kernel state. */
+2: VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS;
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+ tovirt(r1,r1);
+6:
+TRAP_return: /* Make global symbol for debugging */
+ rtbd r14, 0; /* Instructions to return from an IRQ */
+ nop;
+
+
+/* These syscalls need access to the struct pt_regs on the stack, so we
+ implement them in assembly (they're basically all wrappers anyway). */
+
+C_ENTRY(sys_fork_wrapper):
+ addi r5, r0, SIGCHLD /* Arg 0: flags */
+ lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
+ la r7, r1, PTO /* Arg 2: parent context */
+ add r8. r0, r0 /* Arg 3: (unused) */
+ add r9, r0, r0; /* Arg 4: (unused) */
+ add r10, r0, r0; /* Arg 5: (unused) */
+ brid do_fork /* Do real work (tail-call) */
+ nop;
+
+/* This the initial entry point for a new child thread, with an appropriate
+ stack in place that makes it look the the child is in the middle of an
+ syscall. This function is actually `returned to' from switch_thread
+ (copy_thread makes ret_from_fork the return address in each new thread's
+ saved context). */
+C_ENTRY(ret_from_fork):
+ bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
+ add r3, r5, r0; /* switch_thread returns the prev task */
+ /* ( in the delay slot ) */
+ add r3, r0, r0; /* Child's fork call should return 0. */
+ brid ret_from_trap; /* Do normal trap return */
+ nop;
+
+C_ENTRY(sys_vfork_wrapper):
+ la r5, r1, PTO
+ brid sys_vfork /* Do real work (tail-call) */
+ nop
+
+C_ENTRY(sys_clone_wrapper):
+ bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
+ lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
+1: la r7, r1, PTO; /* Arg 2: parent context */
+ add r8, r0, r0; /* Arg 3: (unused) */
+ add r9, r0, r0; /* Arg 4: (unused) */
+ add r10, r0, r0; /* Arg 5: (unused) */
+ brid do_fork /* Do real work (tail-call) */
+ nop;
+
+C_ENTRY(sys_execve_wrapper):
+ la r8, r1, PTO; /* add user context as 4th arg */
+ brid sys_execve; /* Do real work (tail-call).*/
+ nop;
+
+C_ENTRY(sys_sigsuspend_wrapper):
+ swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ swi r4, r1, PTO+PT_R4;
+ la r6, r1, PTO; /* add user context as 2nd arg */
+ bralid r15, sys_sigsuspend; /* Do real work.*/
+ nop;
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ bri ret_from_trap /* fall through will not work here due to align */
+ nop;
+
+C_ENTRY(sys_rt_sigsuspend_wrapper):
+ swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ swi r4, r1, PTO+PT_R4;
+ la r7, r1, PTO; /* add user context as 3rd arg */
+ brlid r15, sys_rt_sigsuspend; /* Do real work.*/
+ nop;
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ bri ret_from_trap /* fall through will not work here due to align */
+ nop;
+
+
+C_ENTRY(sys_sigreturn_wrapper):
+ swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ swi r4, r1, PTO+PT_R4;
+ la r5, r1, PTO; /* add user context as 1st arg */
+ brlid r15, sys_sigreturn; /* Do real work.*/
+ nop;
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ bri ret_from_trap /* fall through will not work here due to align */
+ nop;
+
+C_ENTRY(sys_rt_sigreturn_wrapper):
+ swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ swi r4, r1, PTO+PT_R4;
+ la r5, r1, PTO; /* add user context as 1st arg */
+ brlid r15, sys_rt_sigreturn /* Do real work */
+ nop;
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ bri ret_from_trap /* fall through will not work here due to align */
+ nop;
+
+/*
+ * HW EXCEPTION rutine start
+ */
+
+#define SAVE_STATE \
+ swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
+ set_bip; /*equalize initial state for all possible entries*/\
+ clear_eip; \
+ enable_irq; \
+ set_ee; \
+ /* See if already in kernel mode.*/ \
+ lwi r11, r0, TOPHYS(PER_CPU(KM)); \
+ beqi r11, 1f; /* Jump ahead if coming from user */\
+ /* Kernel-mode state save. */ \
+ /* Reload kernel stack-ptr. */ \
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ tophys(r1,r11); \
+ swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
+ /* store return registers separately because \
+ * this macros is use for others exceptions */ \
+ swi r3, r1, PTO + PT_R3; \
+ swi r4, r1, PTO + PT_R4; \
+ SAVE_REGS \
+ /* PC, before IRQ/trap - this is one instruction above */ \
+ swi r17, r1, PTO+PT_PC; \
+ \
+ addi r11, r0, 1; /* Was in kernel-mode. */ \
+ swi r11, r1, PTO+PT_MODE; \
+ brid 2f; \
+ nop; /* Fill delay slot */ \
+1: /* User-mode state save. */ \
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+ tophys(r1,r1); \
+ lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
+ addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
+ tophys(r1,r1); \
+ \
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
+ /* store return registers separately because this macros \
+ * is use for others exceptions */ \
+ swi r3, r1, PTO + PT_R3; \
+ swi r4, r1, PTO + PT_R4; \
+ SAVE_REGS \
+ /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
+ swi r17, r1, PTO+PT_PC; \
+ \
+ swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
+ addi r11, r0, 1; \
+ swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
+2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+ /* Save away the syscall number. */ \
+ swi r0, r1, PTO+PT_R0; \
+ tovirt(r1,r1)
+
+C_ENTRY(full_exception_trap):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ /* adjust exception address for privileged instruction
+ * for finding where is it */
+ addik r17, r17, -4
+ SAVE_STATE /* Save registers */
+ /* FIXME this can be store directly in PT_ESR reg.
+ * I tested it but there is a fault */
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ la r15, r0, ret_from_exc - 8
+ la r5, r1, PTO /* parameter struct pt_regs * regs */
+ mfs r6, resr
+ nop
+ mfs r7, rfsr; /* save FSR */
+ nop
+ la r12, r0, full_exception
+ set_vms;
+ rtbd r12, 0;
+ nop;
+
+/*
+ * Unaligned data trap.
+ *
+ * Unaligned data trap last on 4k page is handled here.
+ *
+ * Trap entered via exception, so EE bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ *
+ * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
+ */
+C_ENTRY(unaligned_data_trap):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ SAVE_STATE /* Save registers.*/
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ la r15, r0, ret_from_exc-8
+ mfs r3, resr /* ESR */
+ nop
+ mfs r4, rear /* EAR */
+ nop
+ la r7, r1, PTO /* parameter struct pt_regs * regs */
+ la r12, r0, _unaligned_data_exception
+ set_vms;
+ rtbd r12, 0; /* interrupts enabled */
+ nop;
+
+/*
+ * Page fault traps.
+ *
+ * If the real exception handler (from hw_exception_handler.S) didn't find
+ * the mapping for the process, then we're thrown here to handle such situation.
+ *
+ * Trap entered via exceptions, so EE bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ *
+ * Build a standard exception frame for TLB Access errors. All TLB exceptions
+ * will bail out to this point if they can't resolve the lightweight TLB fault.
+ *
+ * The C function called is in "arch/microblaze/mm/fault.c", declared as:
+ * void do_page_fault(struct pt_regs *regs,
+ * unsigned long address,
+ * unsigned long error_code)
+ */
+/* data and intruction trap - which is choose is resolved int fault.c */
+C_ENTRY(page_fault_data_trap):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ SAVE_STATE /* Save registers.*/
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ la r15, r0, ret_from_exc-8
+ la r5, r1, PTO /* parameter struct pt_regs * regs */
+ mfs r6, rear /* parameter unsigned long address */
+ nop
+ mfs r7, resr /* parameter unsigned long error_code */
+ nop
+ la r12, r0, do_page_fault
+ set_vms;
+ rtbd r12, 0; /* interrupts enabled */
+ nop;
+
+C_ENTRY(page_fault_instr_trap):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ SAVE_STATE /* Save registers.*/
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ la r15, r0, ret_from_exc-8
+ la r5, r1, PTO /* parameter struct pt_regs * regs */
+ mfs r6, rear /* parameter unsigned long address */
+ nop
+ ori r7, r0, 0 /* parameter unsigned long error_code */
+ la r12, r0, do_page_fault
+ set_vms;
+ rtbd r12, 0; /* interrupts enabled */
+ nop;
+
+/* Entry point used to return from an exception. */
+C_ENTRY(ret_from_exc):
+ set_bip; /* Ints masked for state restore*/
+ lwi r11, r1, PTO+PT_MODE;
+ bnei r11, 2f; /* See if returning to kernel mode, */
+ /* ... if so, skip resched &c. */
+
+ /* We're returning to user mode, so check for various conditions that
+ trigger rescheduling. */
+ /* Get current task ptr into r11 */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+/* Call the scheduler before returning from a syscall/trap. */
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+
+ /* Maybe handle a signal */
+5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_SIGPENDING;
+ beqi r11, 1f; /* Signals to handle, handle them */
+
+ /*
+ * Handle a signal return; Pending signals should be in r18.
+ *
+ * Not all registers are saved by the normal trap/interrupt entry
+ * points (for instance, call-saved registers (because the normal
+ * C-compiler calling sequence in the kernel makes sure they're
+ * preserved), and call-clobbered registers in the case of
+ * traps), but signal handlers may want to examine or change the
+ * complete register state. Here we save anything not saved by
+ * the normal entry sequence, so that it may be safely restored
+ * (in a possibly modified form) after do_signal returns.
+ * store return registers separately because this macros is use
+ * for others exceptions */
+ swi r3, r1, PTO + PT_R3;
+ swi r4, r1, PTO + PT_R4;
+ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+ addi r7, r0, 0; /* Arg 3: int in_syscall */
+ bralid r15, do_signal; /* Handle any signals */
+ nop;
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+
+/* Finally, return to user state. */
+1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ RESTORE_REGS;
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+
+ lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
+ bri 6f;
+/* Return to kernel state. */
+2: VM_OFF;
+ tophys(r1,r1);
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ RESTORE_REGS;
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+
+ tovirt(r1,r1);
+6:
+EXC_return: /* Make global symbol for debugging */
+ rtbd r14, 0; /* Instructions to return from an IRQ */
+ nop;
+
+/*
+ * HW EXCEPTION rutine end
+ */
+
+/*
+ * Hardware maskable interrupts.
+ *
+ * The stack-pointer (r1) should have already been saved to the memory
+ * location PER_CPU(ENTRY_SP).
+ */
+C_ENTRY(_interrupt):
+/* MS: we are in physical address */
+/* Save registers, switch to proper stack, convert SP to virtual.*/
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+ /* MS: See if already in kernel mode. */
+ lwi r11, r0, TOPHYS(PER_CPU(KM));
+ beqi r11, 1f; /* MS: Jump ahead if coming from user */
+
+/* Kernel-mode state save. */
+ or r11, r1, r0
+ tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
+/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
+ swi r11, r1, (PT_R1 - PT_SIZE);
+/* MS: restore r11 because of saving in SAVE_REGS */
+ lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+ /* save registers */
+/* MS: Make room on the stack -> activation record */
+ addik r1, r1, -STATE_SAVE_SIZE;
+/* MS: store return registers separately because
+ * this macros is use for others exceptions */
+ swi r3, r1, PTO + PT_R3;
+ swi r4, r1, PTO + PT_R4;
+ SAVE_REGS
+ /* MS: store mode */
+ addi r11, r0, 1; /* MS: Was in kernel-mode. */
+ swi r11, r1, PTO + PT_MODE; /* MS: and save it */
+ brid 2f;
+ nop; /* MS: Fill delay slot */
+
+1:
+/* User-mode state save. */
+/* MS: restore r11 -> FIXME move before SAVE_REG */
+ lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+ /* MS: get the saved current */
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO;
+ addik r1, r1, THREAD_SIZE;
+ tophys(r1,r1);
+ /* save registers */
+ addik r1, r1, -STATE_SAVE_SIZE;
+ swi r3, r1, PTO+PT_R3;
+ swi r4, r1, PTO+PT_R4;
+ SAVE_REGS
+ /* calculate mode */
+ swi r0, r1, PTO + PT_MODE;
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PTO+PT_R1;
+ /* setup kernel mode to KM */
+ addi r11, r0, 1;
+ swi r11, r0, TOPHYS(PER_CPU(KM));
+
+2:
+ lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ swi r0, r1, PTO + PT_R0;
+ tovirt(r1,r1)
+ la r5, r1, PTO;
+ set_vms;
+ la r11, r0, do_IRQ;
+ la r15, r0, irq_call;
+irq_call:rtbd r11, 0;
+ nop;
+
+/* MS: we are in virtual mode */
+ret_from_irq:
+ lwi r11, r1, PTO + PT_MODE;
+ bnei r11, 2f;
+
+ add r11, r0, CURRENT_TASK;
+ lwi r11, r11, TS_THREAD_INFO;
+ lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
+ andi r11, r11, _TIF_NEED_RESCHED;
+ beqi r11, 5f
+ bralid r15, schedule;
+ nop; /* delay slot */
+
+ /* Maybe handle a signal */
+5: add r11, r0, CURRENT_TASK;
+ lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_SIGPENDING;
+ beqid r11, no_intr_resched
+/* Handle a signal return; Pending signals should be in r18. */
+ addi r7, r0, 0; /* Arg 3: int in_syscall */
+ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
+ bralid r15, do_signal; /* Handle any signals */
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+
+/* Finally, return to user state. */
+no_intr_resched:
+ /* Disable interrupts, we are now committed to the state restore */
+ disable_irq
+ swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
+ add r11, r0, CURRENT_TASK;
+ swi r11, r0, PER_CPU(CURRENT_SAVE);
+ VM_OFF;
+ tophys(r1,r1);
+ lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
+ lwi r4, r1, PTO + PT_R4;
+ RESTORE_REGS
+ addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
+ lwi r1, r1, PT_R1 - PT_SIZE;
+ bri 6f;
+/* MS: Return to kernel state. */
+2: VM_OFF /* MS: turn off MMU */
+ tophys(r1,r1)
+ lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
+ lwi r4, r1, PTO + PT_R4;
+ RESTORE_REGS
+ addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
+ tovirt(r1,r1);
+6:
+IRQ_return: /* MS: Make global symbol for debugging */
+ rtid r14, 0
+ nop
+
+/*
+ * `Debug' trap
+ * We enter dbtrap in "BIP" (breakpoint) mode.
+ * So we exit the breakpoint mode with an 'rtbd' and proceed with the
+ * original dbtrap.
+ * however, wait to save state first
+ */
+C_ENTRY(_debug_exception):
+ /* BIP bit is set on entry, no interrupts can occur */
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+
+ swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
+ set_bip; /*equalize initial state for all possible entries*/
+ clear_eip;
+ enable_irq;
+ lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
+ beqi r11, 1f; /* Jump ahead if coming from user */
+ /* Kernel-mode state save. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+ tophys(r1,r11);
+ swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+ swi r3, r1, PTO + PT_R3;
+ swi r4, r1, PTO + PT_R4;
+ SAVE_REGS;
+
+ addi r11, r0, 1; /* Was in kernel-mode. */
+ swi r11, r1, PTO + PT_MODE;
+ brid 2f;
+ nop; /* Fill delay slot */
+1: /* User-mode state save. */
+ lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
+ addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
+ tophys(r1,r1);
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+ swi r3, r1, PTO + PT_R3;
+ swi r4, r1, PTO + PT_R4;
+ SAVE_REGS;
+
+ swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PTO+PT_R1; /* Store user SP. */
+ addi r11, r0, 1;
+ swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
+2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ /* Save away the syscall number. */
+ swi r0, r1, PTO+PT_R0;
+ tovirt(r1,r1)
+
+ addi r5, r0, SIGTRAP /* send the trap signal */
+ add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ addk r7, r0, r0 /* 3rd param zero */
+
+ set_vms;
+ la r11, r0, send_sig;
+ la r15, r0, dbtrap_call;
+dbtrap_call: rtbd r11, 0;
+ nop;
+
+ set_bip; /* Ints masked for state restore*/
+ lwi r11, r1, PTO+PT_MODE;
+ bnei r11, 2f;
+
+ /* Get current task ptr into r11 */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+/* Call the scheduler before returning from a syscall/trap. */
+
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+ /* XXX Is PT_DTRACE handling needed here? */
+ /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
+
+ /* Maybe handle a signal */
+5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_SIGPENDING;
+ beqi r11, 1f; /* Signals to handle, handle them */
+
+/* Handle a signal return; Pending signals should be in r18. */
+ /* Not all registers are saved by the normal trap/interrupt entry
+ points (for instance, call-saved registers (because the normal
+ C-compiler calling sequence in the kernel makes sure they're
+ preserved), and call-clobbered registers in the case of
+ traps), but signal handlers may want to examine or change the
+ complete register state. Here we save anything not saved by
+ the normal entry sequence, so that it may be safely restored
+ (in a possibly modified form) after do_signal returns. */
+
+ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+ addi r7, r0, 0; /* Arg 3: int in_syscall */
+ bralid r15, do_signal; /* Handle any signals */
+ nop;
+
+
+/* Finally, return to user state. */
+1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ RESTORE_REGS
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+
+
+ lwi r1, r1, PT_R1 - PT_SIZE;
+ /* Restore user stack pointer. */
+ bri 6f;
+
+/* Return to kernel state. */
+2: VM_OFF;
+ tophys(r1,r1);
+ lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+ lwi r4, r1, PTO+PT_R4;
+ RESTORE_REGS
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+
+ tovirt(r1,r1);
+6:
+DBTRAP_return: /* Make global symbol for debugging */
+ rtbd r14, 0; /* Instructions to return from an IRQ */
+ nop;
+
+
+
+ENTRY(_switch_to)
+ /* prepare return value */
+ addk r3, r0, r31
+
+ /* save registers in cpu_context */
+ /* use r11 and r12, volatile registers, as temp register */
+ /* give start of cpu_context for previous process */
+ addik r11, r5, TI_CPU_CONTEXT
+ swi r1, r11, CC_R1
+ swi r2, r11, CC_R2
+ /* skip volatile registers.
+ * they are saved on stack when we jumped to _switch_to() */
+ /* dedicated registers */
+ swi r13, r11, CC_R13
+ swi r14, r11, CC_R14
+ swi r15, r11, CC_R15
+ swi r16, r11, CC_R16
+ swi r17, r11, CC_R17
+ swi r18, r11, CC_R18
+ /* save non-volatile registers */
+ swi r19, r11, CC_R19
+ swi r20, r11, CC_R20
+ swi r21, r11, CC_R21
+ swi r22, r11, CC_R22
+ swi r23, r11, CC_R23
+ swi r24, r11, CC_R24
+ swi r25, r11, CC_R25
+ swi r26, r11, CC_R26
+ swi r27, r11, CC_R27
+ swi r28, r11, CC_R28
+ swi r29, r11, CC_R29
+ swi r30, r11, CC_R30
+ /* special purpose registers */
+ mfs r12, rmsr
+ nop
+ swi r12, r11, CC_MSR
+ mfs r12, rear
+ nop
+ swi r12, r11, CC_EAR
+ mfs r12, resr
+ nop
+ swi r12, r11, CC_ESR
+ mfs r12, rfsr
+ nop
+ swi r12, r11, CC_FSR
+
+ /* update r31, the current */
+ lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
+ /* stored it to current_save too */
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+
+ /* get new process' cpu context and restore */
+ /* give me start where start context of next task */
+ addik r11, r6, TI_CPU_CONTEXT
+
+ /* non-volatile registers */
+ lwi r30, r11, CC_R30
+ lwi r29, r11, CC_R29
+ lwi r28, r11, CC_R28
+ lwi r27, r11, CC_R27
+ lwi r26, r11, CC_R26
+ lwi r25, r11, CC_R25
+ lwi r24, r11, CC_R24
+ lwi r23, r11, CC_R23
+ lwi r22, r11, CC_R22
+ lwi r21, r11, CC_R21
+ lwi r20, r11, CC_R20
+ lwi r19, r11, CC_R19
+ /* dedicated registers */
+ lwi r18, r11, CC_R18
+ lwi r17, r11, CC_R17
+ lwi r16, r11, CC_R16
+ lwi r15, r11, CC_R15
+ lwi r14, r11, CC_R14
+ lwi r13, r11, CC_R13
+ /* skip volatile registers */
+ lwi r2, r11, CC_R2
+ lwi r1, r11, CC_R1
+
+ /* special purpose registers */
+ lwi r12, r11, CC_FSR
+ mts rfsr, r12
+ nop
+ lwi r12, r11, CC_MSR
+ mts rmsr, r12
+ nop
+
+ rtsd r15, 8
+ nop
+
+ENTRY(_reset)
+ brai 0x70; /* Jump back to FS-boot */
+
+ENTRY(_break)
+ mfs r5, rmsr
+ nop
+ swi r5, r0, 0x250 + TOPHYS(r0_ram)
+ mfs r5, resr
+ nop
+ swi r5, r0, 0x254 + TOPHYS(r0_ram)
+ bri 0
+
+ /* These are compiled and loaded into high memory, then
+ * copied into place in mach_early_setup */
+ .section .init.ivt, "ax"
+ .org 0x0
+ /* this is very important - here is the reset vector */
+ /* in current MMU branch you don't care what is here - it is
+ * used from bootloader site - but this is correct for FS-BOOT */
+ brai 0x70
+ nop
+ brai TOPHYS(_user_exception); /* syscall handler */
+ brai TOPHYS(_interrupt); /* Interrupt handler */
+ brai TOPHYS(_break); /* nmi trap handler */
+ brai TOPHYS(_hw_exception_handler); /* HW exception handler */
+
+ .org 0x60
+ brai TOPHYS(_debug_exception); /* debug trap handler*/
+
+.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
+
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 4a8a4064c7e..0cb64a31e89 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -21,9 +21,9 @@
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
-#include <asm/uaccess.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
#include <asm/current.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
@@ -31,7 +31,7 @@
#define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06
-#define MICROBLAZE_PRIVILEG_EXCEPTION 0x07
+#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock);
@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
+#ifdef CONFIG_MMU
+ int code;
+ addr = regs->pc;
+#endif
+
#if 0
printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
- _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ if (user_mode(regs)) {
+ printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
+ _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ return;
+ }
+ printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
+ die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
- printk(KERN_WARNING "Divide by zero exception\n");
- _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ if (user_mode(regs)) {
+ printk(KERN_WARNING "Divide by zero exception in user mode\n");
+ _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ return;
+ }
+ printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
+ die("Divide by exception", regs, SIGBUS);
break;
-
case MICROBLAZE_FPU_EXCEPTION:
+ printk(KERN_WARNING "FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGFPE, regs, fsr, addr);
break;
+#ifdef CONFIG_MMU
+ case MICROBLAZE_PRIVILEGED_EXCEPTION:
+ printk(KERN_WARNING "Privileged exception\n");
+ /* "brk r0,r0" - used as debug breakpoint */
+ if (get_user(code, (unsigned long *)regs->pc) == 0
+ && code == 0x980c0000) {
+ _exception(SIGTRAP, regs, TRAP_BRKPT, addr);
+ } else {
+ _exception(SIGILL, regs, ILL_PRVOPC, addr);
+ }
+ break;
+#endif
default:
+ /* FIXME what to do in unexpected exception */
printk(KERN_WARNING "Unexpected exception %02x "
"PC=%08x in %s mode\n", type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 319dc35fc92..e568d6ec621 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -3,6 +3,26 @@
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
+ * MMU code derived from arch/ppc/kernel/head_4xx.S:
+ * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ * Initial PowerPC version.
+ * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Rewritten for PReP
+ * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ * Low-level exception handers, MMU support, and rewrite.
+ * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ * PowerPC 8xx modifications.
+ * Copyright (c) 1998-1999 TiVo, Inc.
+ * PowerPC 403GCX modifications.
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * PowerPC 403GCX/405GP modifications.
+ * Copyright 2000 MontaVista Software Inc.
+ * PPC405 modifications
+ * PowerPC 403GCX/405GP modifications.
+ * Author: MontaVista Software, Inc.
+ * frank_rowand@mvista.com or source@mvista.com
+ * debbie_chu@mvista.com
+ *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -12,6 +32,22 @@
#include <asm/thread_info.h>
#include <asm/page.h>
+#ifdef CONFIG_MMU
+#include <asm/setup.h> /* COMMAND_LINE_SIZE */
+#include <asm/mmu.h>
+#include <asm/processor.h>
+
+.data
+.global empty_zero_page
+.align 12
+empty_zero_page:
+ .space 4096
+.global swapper_pg_dir
+swapper_pg_dir:
+ .space 4096
+
+#endif /* CONFIG_MMU */
+
.text
ENTRY(_start)
mfs r1, rmsr
@@ -32,6 +68,123 @@ _copy_fdt:
addik r3, r3, -4 /* descrement loop */
no_fdt_arg:
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_CMDLINE_BOOL
+/*
+ * handling command line
+ * copy command line to __init_end. There is space for storing command line.
+ */
+ or r6, r0, r0 /* incremment */
+ ori r4, r0, __init_end /* load address of command line */
+ tophys(r4,r4) /* convert to phys address */
+ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
+_copy_command_line:
+ lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
+ sb r7, r4, r6 /* addr[r4+r6]= r7*/
+ addik r6, r6, 1 /* increment counting */
+ bgtid r3, _copy_command_line /* loop for all entries */
+ addik r3, r3, -1 /* descrement loop */
+ addik r5, r4, 0 /* add new space for command line */
+ tovirt(r5,r5)
+#endif /* CONFIG_CMDLINE_BOOL */
+
+#ifdef NOT_COMPILE
+/* save bram context */
+ or r6, r0, r0 /* incremment */
+ ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
+ ori r3, r0, (LMB_SIZE - 4)
+_copy_bram:
+ lw r7, r0, r6 /* r7 = r0 + r6 */
+ sw r7, r4, r6 /* addr[r4 + r6] = r7*/
+ addik r6, r6, 4 /* increment counting */
+ bgtid r3, _copy_bram /* loop for all entries */
+ addik r3, r3, -4 /* descrement loop */
+#endif
+ /* We have to turn on the MMU right away. */
+
+ /*
+ * Set up the initial MMU state so we can do the first level of
+ * kernel initialization. This maps the first 16 MBytes of memory 1:1
+ * virtual to physical.
+ */
+ nop
+ addik r3, r0, 63 /* Invalidate all TLB entries */
+_invalidate:
+ mts rtlbx, r3
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ bgtid r3, _invalidate /* loop for all entries */
+ addik r3, r3, -1
+ /* sync */
+
+ /*
+ * We should still be executing code at physical address area
+ * RAM_BASEADDR at this point. However, kernel code is at
+ * a virtual address. So, set up a TLB mapping to cover this once
+ * translation is enabled.
+ */
+
+ addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
+ tophys(r4,r3) /* Load the kernel physical address */
+
+ mts rpid,r0 /* Load the kernel PID */
+ nop
+ bri 4
+
+ /*
+ * Configure and load two entries into TLB slots 0 and 1.
+ * In case we are pinning TLBs, these are reserved in by the
+ * other TLB functions. If not reserving, then it doesn't
+ * matter where they are loaded.
+ */
+ andi r4,r4,0xfffffc00 /* Mask off the real page number */
+ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
+
+ andi r3,r3,0xfffffc00 /* Mask off the effective page number */
+ ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
+
+ mts rtlbx,r0 /* TLB slow 0 */
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ addik r4, r4, 0x01000000 /* Map next 16 M entries */
+ addik r3, r3, 0x01000000
+
+ ori r6,r0,1 /* TLB slot 1 */
+ mts rtlbx,r6
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ /*
+ * Load a TLB entry for LMB, since we need access to
+ * the exception vectors, using a 4k real==virtual mapping.
+ */
+ ori r6,r0,3 /* TLB slot 3 */
+ mts rtlbx,r6
+
+ ori r4,r0,(TLB_WR | TLB_EX)
+ ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ /*
+ * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
+ * caches ready to work.
+ */
+turn_on_mmu:
+ ori r15,r0,start_here
+ ori r4,r0,MSR_KERNEL_VMS
+ mts rmsr,r4
+ nop
+ rted r15,0 /* enables MMU */
+ nop
+
+start_here:
+#endif /* CONFIG_MMU */
+
/* Initialize small data anchors */
la r13, r0, _KERNEL_SDA_BASE_
la r2, r0, _KERNEL_SDA2_BASE_
@@ -51,6 +204,43 @@ no_fdt_arg:
brald r15, r8
nop
+#ifndef CONFIG_MMU
la r15, r0, machine_halt
braid start_kernel
nop
+#else
+ /*
+ * Initialize the MMU.
+ */
+ bralid r15, mmu_init
+ nop
+
+ /* Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the MicroBlaze, all we invalidate the used TLB entries to clear
+ * the old 16M byte TLB mappings.
+ */
+ ori r15,r0,TOPHYS(kernel_load_context)
+ ori r4,r0,MSR_KERNEL
+ mts rmsr,r4
+ nop
+ bri 4
+ rted r15,0
+ nop
+
+ /* Load up the kernel context */
+kernel_load_context:
+ # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
+ ori r5,r0,3
+ mts rtlbx,r5
+ nop
+ mts rtlbhi,r0
+ nop
+ addi r15, r0, machine_halt
+ ori r17, r0, start_kernel
+ ori r4, r0, MSR_KERNEL_VMS
+ mts rmsr, r4
+ nop
+ rted r17, 0 /* enable MMU and jump to start_kernel */
+ nop
+#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index cf9486d9983..9d591cd74fc 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -53,6 +53,12 @@
* - Illegal instruction opcode
* - Divide-by-zero
*
+ * - Privileged instruction exception (MMU)
+ * - Data storage exception (MMU)
+ * - Instruction storage exception (MMU)
+ * - Data TLB miss exception (MMU)
+ * - Instruction TLB miss exception (MMU)
+ *
* Note we disable interrupts during exception handling, otherwise we will
* possibly get multiple re-entrancy if interrupt handles themselves cause
* exceptions. JW
@@ -71,9 +77,24 @@
#include <asm/asm-offsets.h>
/* Helpful Macros */
+#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19)
+#endif
#define NUM_TO_REG(num) r ## num
+#ifdef CONFIG_MMU
+/* FIXME you can't change first load of MSR because there is
+ * hardcoded jump bri 4 */
+ #define RESTORE_STATE \
+ lwi r3, r1, PT_R3; \
+ lwi r4, r1, PT_R4; \
+ lwi r5, r1, PT_R5; \
+ lwi r6, r1, PT_R6; \
+ lwi r11, r1, PT_R11; \
+ lwi r31, r1, PT_R31; \
+ lwi r1, r0, TOPHYS(r0_ram + 0);
+#endif /* CONFIG_MMU */
+
#define LWREG_NOP \
bri ex_handler_unhandled; \
nop;
@@ -106,6 +127,54 @@
or r3, r0, NUM_TO_REG (regnum); \
bri ex_sw_tail;
+#ifdef CONFIG_MMU
+ #define R3_TO_LWREG_VM_V(regnum) \
+ brid ex_lw_end_vm; \
+ swi r3, r7, 4 * regnum;
+
+ #define R3_TO_LWREG_VM(regnum) \
+ brid ex_lw_end_vm; \
+ or NUM_TO_REG (regnum), r0, r3;
+
+ #define SWREG_TO_R3_VM_V(regnum) \
+ brid ex_sw_tail_vm; \
+ lwi r3, r7, 4 * regnum;
+
+ #define SWREG_TO_R3_VM(regnum) \
+ brid ex_sw_tail_vm; \
+ or r3, r0, NUM_TO_REG (regnum);
+
+ /* Shift right instruction depending on available configuration */
+ #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
+ #define BSRLI(rD, rA, imm) \
+ bsrli rD, rA, imm
+ #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
+ #define BSRLI(rD, rA, imm) \
+ ori rD, r0, (1 << imm); \
+ idivu rD, rD, rA
+ #else
+ #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
+ /* Only the used shift constants defined here - add more if needed */
+ #define BSRLI2(rD, rA) \
+ srl rD, rA; /* << 1 */ \
+ srl rD, rD; /* << 2 */
+ #define BSRLI10(rD, rA) \
+ srl rD, rA; /* << 1 */ \
+ srl rD, rD; /* << 2 */ \
+ srl rD, rD; /* << 3 */ \
+ srl rD, rD; /* << 4 */ \
+ srl rD, rD; /* << 5 */ \
+ srl rD, rD; /* << 6 */ \
+ srl rD, rD; /* << 7 */ \
+ srl rD, rD; /* << 8 */ \
+ srl rD, rD; /* << 9 */ \
+ srl rD, rD /* << 10 */
+ #define BSRLI20(rD, rA) \
+ BSRLI10(rD, rA); \
+ BSRLI10(rD, rD)
+ #endif
+#endif /* CONFIG_MMU */
+
.extern other_exception_handler /* Defined in exception.c */
/*
@@ -163,34 +232,119 @@
/* wrappers to restore state before coming to entry.S */
+#ifdef CONFIG_MMU
+.section .rodata
+.align 4
+_MB_HW_ExceptionVectorTable:
+/* 0 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+/* 1 - Unaligned data access exception */
+ .long TOPHYS(handle_unaligned_ex)
+/* 2 - Illegal op-code exception */
+ .long TOPHYS(full_exception_trapw)
+/* 3 - Instruction bus error exception */
+ .long TOPHYS(full_exception_trapw)
+/* 4 - Data bus error exception */
+ .long TOPHYS(full_exception_trapw)
+/* 5 - Divide by zero exception */
+ .long TOPHYS(full_exception_trapw)
+/* 6 - Floating point unit exception */
+ .long TOPHYS(full_exception_trapw)
+/* 7 - Privileged instruction exception */
+ .long TOPHYS(full_exception_trapw)
+/* 8 - 15 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+/* 16 - Data storage exception */
+ .long TOPHYS(handle_data_storage_exception)
+/* 17 - Instruction storage exception */
+ .long TOPHYS(handle_instruction_storage_exception)
+/* 18 - Data TLB miss exception */
+ .long TOPHYS(handle_data_tlb_miss_exception)
+/* 19 - Instruction TLB miss exception */
+ .long TOPHYS(handle_instruction_tlb_miss_exception)
+/* 20 - 31 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+#endif
+
.global _hw_exception_handler
.section .text
.align 4
.ent _hw_exception_handler
_hw_exception_handler:
+#ifndef CONFIG_MMU
addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
+#else
+ swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
+ /* Save date to kernel memory. Here is the problem
+ * when you came from user space */
+ ori r1, r0, TOPHYS(r0_ram + 28);
+#endif
swi r3, r1, PT_R3
swi r4, r1, PT_R4
swi r5, r1, PT_R5
swi r6, r1, PT_R6
- mfs r5, rmsr;
- nop
- swi r5, r1, 0;
- mfs r4, rbtr /* Save BTR before jumping to handler */
- nop
+#ifdef CONFIG_MMU
+ swi r11, r1, PT_R11
+ swi r31, r1, PT_R31
+ lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
+#endif
+
mfs r3, resr
nop
+ mfs r4, rear;
+ nop
+#ifndef CONFIG_MMU
andi r5, r3, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
not_in_delay_slot:
swi r17, r1, PT_R17
+#endif
andi r5, r3, 0x1F; /* Extract ESR[EXC] */
+#ifdef CONFIG_MMU
+ /* Calculate exception vector offset = r5 << 2 */
+ addk r6, r5, r5; /* << 1 */
+ addk r6, r6, r6; /* << 2 */
+
+/* counting which exception happen */
+ lwi r5, r0, 0x200 + TOPHYS(r0_ram)
+ addi r5, r5, 1
+ swi r5, r0, 0x200 + TOPHYS(r0_ram)
+ lwi r5, r6, 0x200 + TOPHYS(r0_ram)
+ addi r5, r5, 1
+ swi r5, r6, 0x200 + TOPHYS(r0_ram)
+/* end */
+ /* Load the HW Exception vector */
+ lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
+ bra r6
+
+full_exception_trapw:
+ RESTORE_STATE
+ bri full_exception_trap
+#else
/* Exceptions enabled here. This will allow nested exceptions */
mfs r6, rmsr;
nop
@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */
lwi r18, r1, PT_R18
bri ex_handler_done; /* Complete exception handling */
+#endif
/* 0x01 - Unaligned data access exception
* This occurs when a word access is not aligned on a word boundary,
@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
- * R4 = BTR
+ * R4 = EAR
*/
- mfs r4, rear;
+#ifdef CONFIG_MMU
+ andi r6, r3, 0x1000 /* Check ESR[DS] */
+ beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
+ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
+_no_delayslot:
+#endif
+
+#ifdef CONFIG_MMU
+ /* Check if unaligned address is last on a 4k page */
+ andi r5, r4, 0xffc
+ xori r5, r5, 0xffc
+ bnei r5, _unaligned_ex2
+ _unaligned_ex1:
+ RESTORE_STATE;
+/* Another page must be accessed or physical address not in page table */
+ bri unaligned_data_trap
+ _unaligned_ex2:
+#endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
@@ -278,6 +450,45 @@ handle_unaligned_ex:
srl r6, r6;
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
+#ifdef CONFIG_MMU
+ /* Get physical address */
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ ori r5, r0, CONFIG_KERNEL_START
+ cmpu r5, r4, r5
+ bgti r5, _unaligned_ex3
+ ori r5, r0, swapper_pg_dir
+ bri _unaligned_ex4
+
+ /* Get the PGD for the current thread. */
+_unaligned_ex3: /* user thread */
+ addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
+ lwi r5, r5, TASK_THREAD + PGDIR
+_unaligned_ex4:
+ tophys(r5,r5)
+ BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
+ andi r6, r6, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
+ or r5, r5, r6
+ lwi r6, r5, 0 /* Get L1 entry */
+ andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
+ beqi r5, _unaligned_ex1 /* Bail if no table */
+
+ tophys(r5,r5)
+ BSRLI(r6,r4,10) /* Compute PTE address */
+ andi r6, r6, 0xffc
+ andi r5, r5, 0xfffff003
+ or r5, r5, r6
+ lwi r5, r5, 0 /* Get Linux PTE */
+
+ andi r6, r5, _PAGE_PRESENT
+ beqi r6, _unaligned_ex1 /* Bail if no page */
+
+ andi r5, r5, 0xfffff000 /* Extract RPN */
+ andi r4, r4, 0x00000fff /* Extract offset */
+ or r4, r4, r5 /* Create physical address */
+#endif /* CONFIG_MMU */
andi r6, r3, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
@@ -355,6 +566,7 @@ ex_shw:
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
+#ifndef CONFIG_MMU
lwi r5, r1, 0 /* RMSR */
mts rmsr, r5
nop
@@ -366,13 +578,455 @@ ex_handler_done:
rted r17, 0
addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
+#else
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+#endif
+
+#ifdef CONFIG_MMU
+ /* Exception vector entry code. This code runs with address translation
+ * turned off (i.e. using physical addresses). */
+
+ /* Exception vectors. */
+
+ /* 0x10 - Data Storage Exception
+ * This happens for just a few reasons. U0 set (but we don't do that),
+ * or zone protection fault (user violation, write to protected page).
+ * If this is just an update of modified status, we do that quickly
+ * and exit. Otherwise, we call heavyweight functions to do the work.
+ */
+ handle_data_storage_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+ mfs r11, rpid
+ nop
+ bri 4
+ mfs r3, rear /* Get faulting address */
+ nop
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ ori r4, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r4
+ bgti r4, ex3
+ /* First, check if it was a zone fault (which means a user
+ * tried to access a kernel or read-protected page - always
+ * a SEGV). All other faults here must be stores, so no
+ * need to check ESR_S as well. */
+ mfs r4, resr
+ nop
+ andi r4, r4, 0x800 /* ESR_Z - zone protection */
+ bnei r4, ex2
+
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex4
+
+ /* Get the PGD for the current thread. */
+ ex3:
+ /* First, check if it was a zone fault (which means a user
+ * tried to access a kernel or read-protected page - always
+ * a SEGV). All other faults here must be stores, so no
+ * need to check ESR_S as well. */
+ mfs r4, resr
+ nop
+ andi r4, r4, 0x800 /* ESR_Z */
+ bnei r4, ex2
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex4:
+ tophys(r4,r4)
+ BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
+ andi r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ beqi r5, ex2 /* Bail if no table */
+
+ tophys(r5,r5)
+ BSRLI(r6,r3,10) /* Compute PTE address */
+ andi r6, r6, 0xffc
+ andi r5, r5, 0xfffff003
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_RW /* Is it writeable? */
+ beqi r6, ex2 /* Bail if not */
+
+ /* Update 'changed' */
+ ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+ swi r4, r5, 0 /* Update Linux page table */
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+ ori r4, r4, _PAGE_HWEXEC /* make it executable */
+
+ /* find the TLB index that caused the fault. It has to be here*/
+ mts rtlbsx, r3
+ nop
+ mfs r5, rtlbx /* DEBUG: TBD */
+ nop
+ mts rtlblo, r4 /* Load TLB LO */
+ nop
+ /* Will sync shadow TLBs */
+
+ /* Done...restore registers and get out of here. */
+ mts rpid, r11
+ nop
+ bri 4
+
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+ ex2:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out. */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_data_trap
+
+
+ /* 0x11 - Instruction Storage Exception
+ * This is caused by a fetch from non-execute or guarded pages. */
+ handle_instruction_storage_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+
+ mfs r3, rear /* Get faulting address */
+ nop
+ RESTORE_STATE;
+ bri page_fault_instr_trap
+
+ /* 0x12 - Data TLB Miss Exception
+ * As the name implies, translation is not in the MMU, so search the
+ * page tables and fix it. The only purpose of this function is to
+ * load TLB entries from the page table if they exist.
+ */
+ handle_data_tlb_miss_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+ mfs r11, rpid
+ nop
+ bri 4
+ mfs r3, rear /* Get faulting address */
+ nop
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables. */
+ ori r4, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r4
+ bgti r4, ex5
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex6
+ /* Get the PGD for the current thread. */
+ ex5:
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex6:
+ tophys(r4,r4)
+ BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
+ andi r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ beqi r5, ex7 /* Bail if no table */
+
+ tophys(r5,r5)
+ BSRLI(r6,r3,10) /* Compute PTE address */
+ andi r6, r6, 0xffc
+ andi r5, r5, 0xfffff003
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_PRESENT
+ beqi r6, ex7
+
+ ori r4, r4, _PAGE_ACCESSED
+ swi r4, r5, 0
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+
+ bri finish_tlb_load
+ ex7:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_data_trap
+
+ /* 0x13 - Instruction TLB Miss Exception
+ * Nearly the same as above, except we get our information from
+ * different registers and bailout to a different point.
+ */
+ handle_instruction_tlb_miss_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+ mfs r11, rpid
+ nop
+ bri 4
+ mfs r3, rear /* Get faulting address */
+ nop
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ ori r4, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r4
+ bgti r4, ex8
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex9
+
+ /* Get the PGD for the current thread. */
+ ex8:
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex9:
+ tophys(r4,r4)
+ BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
+ andi r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ beqi r5, ex10 /* Bail if no table */
+
+ tophys(r5,r5)
+ BSRLI(r6,r3,10) /* Compute PTE address */
+ andi r6, r6, 0xffc
+ andi r5, r5, 0xfffff003
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_PRESENT
+ beqi r6, ex7
+
+ ori r4, r4, _PAGE_ACCESSED
+ swi r4, r5, 0
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+
+ bri finish_tlb_load
+ ex10:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_instr_trap
+
+/* Both the instruction and data TLB miss get to this point to load the TLB.
+ * r3 - EA of fault
+ * r4 - TLB LO (info from Linux PTE)
+ * r5, r6 - available to use
+ * PID - loaded with proper value when we get here
+ * Upon exit, we reload everything and RFI.
+ * A common place to load the TLB.
+ */
+ tlb_index:
+ .long 1 /* MS: storing last used tlb index */
+ finish_tlb_load:
+ /* MS: load the last used TLB index. */
+ lwi r5, r0, TOPHYS(tlb_index)
+ addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
+
+/* MS: FIXME this is potential fault, because this is mask not count */
+ andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
+ ori r6, r0, 1
+ cmp r31, r5, r6
+ blti r31, sem
+ addik r5, r6, 1
+ sem:
+ /* MS: save back current TLB index */
+ swi r5, r0, TOPHYS(tlb_index)
+
+ ori r4, r4, _PAGE_HWEXEC /* make it executable */
+ mts rtlbx, r5 /* MS: save current TLB */
+ nop
+ mts rtlblo, r4 /* MS: save to TLB LO */
+ nop
+
+ /* Create EPN. This is the faulting address plus a static
+ * set of bits. These are size, valid, E, U0, and ensure
+ * bits 20 and 21 are zero.
+ */
+ andi r3, r3, 0xfffff000
+ ori r3, r3, 0x0c0
+ mts rtlbhi, r3 /* Load TLB HI */
+ nop
+
+ /* Done...restore registers and get out of here. */
+ ex12:
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+
+ /* extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The MicroBlaze processor may have an FPU, so this should not just
+ * return: TBD.
+ */
+ .globl giveup_fpu;
+ .align 4;
+ giveup_fpu:
+ bralid r15,0 /* TBD */
+ nop
+
+ /* At present, this routine just hangs. - extern void abort(void) */
+ .globl abort;
+ .align 4;
+ abort:
+ br r0
+
+ .globl set_context;
+ .align 4;
+ set_context:
+ mts rpid, r5 /* Shadow TLBs are automatically */
+ nop
+ bri 4 /* flushed by changing PID */
+ rtsd r15,8
+ nop
+
+#endif
.end _hw_exception_handler
+#ifdef CONFIG_MMU
+/* Unaligned data access exception last on a 4k page for MMU.
+ * When this is called, we are in virtual mode with exceptions enabled
+ * and registers 1-13,15,17,18 saved.
+ *
+ * R3 = ESR
+ * R4 = EAR
+ * R7 = pointer to saved registers (struct pt_regs *regs)
+ *
+ * This handler perform the access, and returns via ret_from_exc.
+ */
+.global _unaligned_data_exception
+.ent _unaligned_data_exception
+_unaligned_data_exception:
+ andi r8, r3, 0x3E0; /* Mask and extract the register operand */
+ BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */
+ andi r6, r3, 0x400; /* Extract ESR[S] */
+ bneid r6, ex_sw_vm;
+ andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
+ex_lw_vm:
+ beqid r6, ex_lhw_vm;
+ lbui r5, r4, 0; /* Exception address in r4 - delay slot */
+/* Load a word, byte-by-byte from destination address and save it in tmp space*/
+ la r6, r0, ex_tmp_data_loc_0;
+ sbi r5, r6, 0;
+ lbui r5, r4, 1;
+ sbi r5, r6, 1;
+ lbui r5, r4, 2;
+ sbi r5, r6, 2;
+ lbui r5, r4, 3;
+ sbi r5, r6, 3;
+ brid ex_lw_tail_vm;
+/* Get the destination register value into r3 - delay slot */
+ lwi r3, r6, 0;
+ex_lhw_vm:
+ /* Load a half-word, byte-by-byte from destination address and
+ * save it in tmp space */
+ la r6, r0, ex_tmp_data_loc_0;
+ sbi r5, r6, 0;
+ lbui r5, r4, 1;
+ sbi r5, r6, 1;
+ lhui r3, r6, 0; /* Get the destination register value into r3 */
+ex_lw_tail_vm:
+ /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
+ addik r5, r8, lw_table_vm;
+ bra r5;
+ex_lw_end_vm: /* Exception handling of load word, ends */
+ brai ret_from_exc;
+ex_sw_vm:
+/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
+ addik r5, r8, sw_table_vm;
+ bra r5;
+ex_sw_tail_vm:
+ la r5, r0, ex_tmp_data_loc_0;
+ beqid r6, ex_shw_vm;
+ swi r3, r5, 0; /* Get the word - delay slot */
+ /* Store the word, byte-by-byte into destination address */
+ lbui r3, r5, 0;
+ sbi r3, r4, 0;
+ lbui r3, r5, 1;
+ sbi r3, r4, 1;
+ lbui r3, r5, 2;
+ sbi r3, r4, 2;
+ lbui r3, r5, 3;
+ brid ret_from_exc;
+ sbi r3, r4, 3; /* Delay slot */
+ex_shw_vm:
+ /* Store the lower half-word, byte-by-byte into destination address */
+ lbui r3, r5, 2;
+ sbi r3, r4, 0;
+ lbui r3, r5, 3;
+ brid ret_from_exc;
+ sbi r3, r4, 1; /* Delay slot */
+ex_sw_end_vm: /* Exception handling of store word, ends. */
+.end _unaligned_data_exception
+#endif /* CONFIG_MMU */
+
ex_handler_unhandled:
/* FIXME add handle function for unhandled exception - dump register */
bri 0
+/*
+ * hw_exception_handler Jump Table
+ * - Contains code snippets for each register that caused the unalign exception
+ * - Hence exception handler is NOT self-modifying
+ * - Separate table for load exceptions and store exceptions.
+ * - Each table is of size: (8 * 32) = 256 bytes
+ */
+
.section .text
.align 4
lw_table:
@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
+#ifdef CONFIG_MMU
+lw_r31: R3_TO_LWREG_V (31);
+#else
lw_r31: R3_TO_LWREG (31);
+#endif
sw_table:
sw_r0: SWREG_TO_R3 (0);
@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
+#ifdef CONFIG_MMU
+sw_r31: SWREG_TO_R3_V (31);
+#else
sw_r31: SWREG_TO_R3 (31);
+#endif
+
+#ifdef CONFIG_MMU
+lw_table_vm:
+lw_r0_vm: R3_TO_LWREG_VM (0);
+lw_r1_vm: R3_TO_LWREG_VM_V (1);
+lw_r2_vm: R3_TO_LWREG_VM_V (2);
+lw_r3_vm: R3_TO_LWREG_VM_V (3);
+lw_r4_vm: R3_TO_LWREG_VM_V (4);
+lw_r5_vm: R3_TO_LWREG_VM_V (5);
+lw_r6_vm: R3_TO_LWREG_VM_V (6);
+lw_r7_vm: R3_TO_LWREG_VM_V (7);
+lw_r8_vm: R3_TO_LWREG_VM_V (8);
+lw_r9_vm: R3_TO_LWREG_VM_V (9);
+lw_r10_vm: R3_TO_LWREG_VM_V (10);
+lw_r11_vm: R3_TO_LWREG_VM_V (11);
+lw_r12_vm: R3_TO_LWREG_VM_V (12);
+lw_r13_vm: R3_TO_LWREG_VM_V (13);
+lw_r14_vm: R3_TO_LWREG_VM (14);
+lw_r15_vm: R3_TO_LWREG_VM_V (15);
+lw_r16_vm: R3_TO_LWREG_VM (16);
+lw_r17_vm: R3_TO_LWREG_VM_V (17);
+lw_r18_vm: R3_TO_LWREG_VM_V (18);
+lw_r19_vm: R3_TO_LWREG_VM (19);
+lw_r20_vm: R3_TO_LWREG_VM (20);
+lw_r21_vm: R3_TO_LWREG_VM (21);
+lw_r22_vm: R3_TO_LWREG_VM (22);
+lw_r23_vm: R3_TO_LWREG_VM (23);
+lw_r24_vm: R3_TO_LWREG_VM (24);
+lw_r25_vm: R3_TO_LWREG_VM (25);
+lw_r26_vm: R3_TO_LWREG_VM (26);
+lw_r27_vm: R3_TO_LWREG_VM (27);
+lw_r28_vm: R3_TO_LWREG_VM (28);
+lw_r29_vm: R3_TO_LWREG_VM (29);
+lw_r30_vm: R3_TO_LWREG_VM (30);
+lw_r31_vm: R3_TO_LWREG_VM_V (31);
+
+sw_table_vm:
+sw_r0_vm: SWREG_TO_R3_VM (0);
+sw_r1_vm: SWREG_TO_R3_VM_V (1);
+sw_r2_vm: SWREG_TO_R3_VM_V (2);
+sw_r3_vm: SWREG_TO_R3_VM_V (3);
+sw_r4_vm: SWREG_TO_R3_VM_V (4);
+sw_r5_vm: SWREG_TO_R3_VM_V (5);
+sw_r6_vm: SWREG_TO_R3_VM_V (6);
+sw_r7_vm: SWREG_TO_R3_VM_V (7);
+sw_r8_vm: SWREG_TO_R3_VM_V (8);
+sw_r9_vm: SWREG_TO_R3_VM_V (9);
+sw_r10_vm: SWREG_TO_R3_VM_V (10);
+sw_r11_vm: SWREG_TO_R3_VM_V (11);
+sw_r12_vm: SWREG_TO_R3_VM_V (12);
+sw_r13_vm: SWREG_TO_R3_VM_V (13);
+sw_r14_vm: SWREG_TO_R3_VM (14);
+sw_r15_vm: SWREG_TO_R3_VM_V (15);
+sw_r16_vm: SWREG_TO_R3_VM (16);
+sw_r17_vm: SWREG_TO_R3_VM_V (17);
+sw_r18_vm: SWREG_TO_R3_VM_V (18);
+sw_r19_vm: SWREG_TO_R3_VM (19);
+sw_r20_vm: SWREG_TO_R3_VM (20);
+sw_r21_vm: SWREG_TO_R3_VM (21);
+sw_r22_vm: SWREG_TO_R3_VM (22);
+sw_r23_vm: SWREG_TO_R3_VM (23);
+sw_r24_vm: SWREG_TO_R3_VM (24);
+sw_r25_vm: SWREG_TO_R3_VM (25);
+sw_r26_vm: SWREG_TO_R3_VM (26);
+sw_r27_vm: SWREG_TO_R3_VM (27);
+sw_r28_vm: SWREG_TO_R3_VM (28);
+sw_r29_vm: SWREG_TO_R3_VM (29);
+sw_r30_vm: SWREG_TO_R3_VM (30);
+sw_r31_vm: SWREG_TO_R3_VM_V (31);
+#endif /* CONFIG_MMU */
/* Temporary data structures used in the handler */
.section .data
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index a69d3e3c2fd..b15605299a5 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -137,8 +137,8 @@ void __init init_IRQ(void)
intr_type =
*(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL);
- if (intr_type >= (1 << nr_irq))
- printk(KERN_INFO " ERROR: Mishmash in king-of-intr param\n");
+ if (intr_type >= (1 << (nr_irq + 1)))
+ printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
#ifdef CONFIG_SELFMOD_INTC
selfmod_function((int *) arr_func, intc_baseaddr);
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 5f71790e3c3..59ff20e33e0 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -45,3 +45,5 @@ extern void __udivsi3(void);
EXPORT_SYMBOL(__udivsi3);
extern void __umodsi3(void);
EXPORT_SYMBOL(__umodsi3);
+extern char *_ebss;
+EXPORT_SYMBOL_GPL(_ebss);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
new file mode 100644
index 00000000000..df16c6287a8
--- /dev/null
+++ b/arch/microblaze/kernel/misc.S
@@ -0,0 +1,120 @@
+/*
+ * Miscellaneous low-level MMU functions.
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
+ *
+ * Derived from arch/ppc/kernel/misc.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <linux/errno.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+ .text
+/*
+ * Flush MMU TLB
+ *
+ * We avoid flushing the pinned 0, 1 and possibly 2 entries.
+ */
+.globl _tlbia;
+.align 4;
+_tlbia:
+ addik r12, r0, 63 /* flush all entries (63 - 3) */
+ /* isync */
+_tlbia_1:
+ mts rtlbx, r12
+ nop
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ nop
+ addik r11, r12, -2
+ bneid r11, _tlbia_1 /* loop for all entries */
+ addik r12, r12, -1
+ /* sync */
+ rtsd r15, 8
+ nop
+
+/*
+ * Flush MMU TLB for a particular address (in r5)
+ */
+.globl _tlbie;
+.align 4;
+_tlbie:
+ mts rtlbsx, r5 /* look up the address in TLB */
+ nop
+ mfs r12, rtlbx /* Retrieve index */
+ nop
+ blti r12, _tlbie_1 /* Check if found */
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ nop
+_tlbie_1:
+ rtsd r15, 8
+ nop
+
+/*
+ * Allocate TLB entry for early console
+ */
+.globl early_console_reg_tlb_alloc;
+.align 4;
+early_console_reg_tlb_alloc:
+ /*
+ * Load a TLB entry for the UART, so that microblaze_progress() can use
+ * the UARTs nice and early. We use a 4k real==virtual mapping.
+ */
+ ori r4, r0, 63
+ mts rtlbx, r4 /* TLB slot 2 */
+
+ or r4,r5,r0
+ andi r4,r4,0xfffff000
+ ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
+
+ andi r5,r5,0xfffff000
+ ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ nop
+ mts rtlbhi,r5 /* Load the tag portion of the entry */
+ nop
+ rtsd r15, 8
+ nop
+
+/*
+ * Copy a whole page (4096 bytes).
+ */
+#define COPY_16_BYTES \
+ lwi r7, r6, 0; \
+ lwi r8, r6, 4; \
+ lwi r9, r6, 8; \
+ lwi r10, r6, 12; \
+ swi r7, r5, 0; \
+ swi r8, r5, 4; \
+ swi r9, r5, 8; \
+ swi r10, r5, 12
+
+
+/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
+#define DCACHE_LINE_BYTES (4 * 4)
+
+.globl copy_page;
+.align 4;
+copy_page:
+ ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
+_copy_page_loop:
+ COPY_16_BYTES
+#if DCACHE_LINE_BYTES >= 32
+ COPY_16_BYTES
+#endif
+ addik r6, r6, DCACHE_LINE_BYTES
+ addik r5, r5, DCACHE_LINE_BYTES
+ bneid r11, _copy_page_loop
+ addik r11, r11, -1
+ rtsd r15, 8
+ nop
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 07d4fa339ed..00b12c6d532 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
else
childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
+#ifndef CONFIG_MMU
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs;
ti->cpu_context.msr = (unsigned long)childregs->msr;
+#else
+
+ /* if creating a kernel thread then update the current reg (we don't
+ * want to use the parent's value when restoring by POP_STATE) */
+ if (kernel_mode(regs))
+ /* save new current on stack to use POP_STATE */
+ childregs->CURRENT_TASK = (unsigned long)p;
+ /* if returning to user then use the parent's value of this register */
+
+ /* if we're creating a new kernel thread then just zeroing all
+ * the registers. That's OK for a brand new thread.*/
+ /* Pls. note that some of them will be restored in POP_STATE */
+ if (kernel_mode(regs))
+ memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
+ /* if this thread is created for fork/vfork/clone, then we want to
+ * restore all the parent's context */
+ /* in addition to the registers which will be restored by POP_STATE */
+ else {
+ ti->cpu_context = *(struct cpu_context *)regs;
+ childregs->msr |= MSR_UMS;
+ }
+
+ /* FIXME STATE_SAVE_PT_OFFSET; */
+ ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE;
+ /* we should consider the fact that childregs is a copy of the parent
+ * regs which were saved immediately after entering the kernel state
+ * before enabling VM. This MSR will be restored in switch_to and
+ * RETURN() and we want to have the right machine state there
+ * specifically this state must have INTs disabled before and enabled
+ * after performing rtbd
+ * compose the right MSR for RETURN(). It will work for switch_to also
+ * excepting for VM and UMS
+ * don't touch UMS , CARRY and cache bits
+ * right now MSR is a copy of parent one */
+ childregs->msr |= MSR_BIP;
+ childregs->msr &= ~MSR_EIP;
+ childregs->msr |= MSR_IE;
+ childregs->msr &= ~MSR_VM;
+ childregs->msr |= MSR_VMS;
+ childregs->msr |= MSR_EE; /* exceptions will be enabled*/
+
+ ti->cpu_context.msr = (childregs->msr|MSR_VM);
+ ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
+#endif
ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
if (clone_flags & CLONE_SETTLS)
@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
+#ifndef CONFIG_MMU
/*
* Return saved PC of a blocked thread.
*/
@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
else
return ctx->r14;
}
+#endif
static void kernel_thread_helper(int (*fn)(void *), void *arg)
{
@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
&regs, 0, NULL, NULL);
}
+EXPORT_SYMBOL_GPL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
regs->r1 = usp;
regs->pt_mode = 0;
}
+
+#ifdef CONFIG_MMU
+#include <linux/elfcore.h>
+/*
+ * Set up a thread for executing a new program
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+{
+ return 0; /* MicroBlaze has no separate FPU registers */
+}
+#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 34c48718061..c005cc6f1aa 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
if (prop) {
- initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
+ initrd_start = (unsigned long)
+ __va((u32)of_read_ulong(prop, l/4));
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
if (prop) {
initrd_end = (unsigned long)
- __va(of_read_ulong(prop, l/4));
+ __va((u32)of_read_ulong(prop, 1/4));
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE
+#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
+#endif
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index eb6b41758e2..8709bea0960 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE];
void __init setup_arch(char **cmdline_p)
{
-#ifdef CONFIG_CMDLINE_FORCE
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
- strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
*cmdline_p = cmd_line;
console_verbose();
@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
{
unsigned long *src, *dst = (unsigned long *)0x0;
+ /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
+ * end of kernel. There are two position which we want to check.
+ * The first is __init_end and the second __bss_start.
+ */
+#ifdef CONFIG_MTD_UCLINUX
+ int romfs_size;
+ unsigned int romfs_base;
+ char *old_klimit = klimit;
+
+ romfs_base = (ram ? ram : (unsigned int)&__init_end);
+ romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
+ if (!romfs_size) {
+ romfs_base = (unsigned int)&__bss_start;
+ romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
+ }
+
+ /* Move ROMFS out of BSS before clearing it */
+ if (romfs_size > 0) {
+ memmove(&_ebss, (int *)romfs_base, romfs_size);
+ klimit += romfs_size;
+ }
+#endif
+
/* clearing bss section */
memset(__bss_start, 0, __bss_stop-__bss_start);
memset(_ssbss, 0, _esbss-_ssbss);
- /*
- * Copy command line passed from bootloader, or use default
- * if none provided, or forced
- */
+ /* Copy command line passed from bootloader */
#ifndef CONFIG_CMDLINE_BOOL
if (cmdline && cmdline[0] != '\0')
strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
#ifdef CONFIG_MTD_UCLINUX
- {
- int size;
- unsigned int romfs_base;
- romfs_base = (ram ? ram : (unsigned int)&__init_end);
- /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the
- * end of kernel, which is ROMFS_LOCATION defined above. */
- size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
- early_printk("Found romfs @ 0x%08x (0x%08x)\n",
- romfs_base, size);
- early_printk("#### klimit %p ####\n", klimit);
- BUG_ON(size < 0); /* What else can we do? */
-
- /* Use memmove to handle likely case of memory overlap */
- early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n",
- size, romfs_base, (unsigned)&_ebss);
- memmove(&_ebss, (int *)romfs_base, size);
-
- /* update klimit */
- klimit += PAGE_ALIGN(size);
- early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
- }
+ early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+ romfs_base, romfs_size);
+ early_printk("#### klimit %p ####\n", old_klimit);
+ BUG_ON(romfs_size < 0); /* What else can we do? */
+
+ early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ romfs_size, romfs_base, (unsigned)&_ebss);
+
+ early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 40d36931e36..4c0e6521b11 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -152,8 +152,8 @@ struct rt_sigframe {
unsigned long tramp[2]; /* signal trampoline */
};
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p)
+static int restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc, int *rval_p)
{
unsigned int err = 0;
@@ -211,11 +211,10 @@ badframe:
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{
- struct rt_sigframe *frame =
- (struct rt_sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE);
+ struct rt_sigframe __user *frame =
+ (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE);
sigset_t set;
- stack_t st;
int rval;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
goto badframe;
- if (__copy_from_user((void *)&st, &frame->uc.uc_stack, sizeof(st)))
- goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack(&st, NULL, regs->r1);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
+ goto badframe;
return rval;
@@ -251,7 +249,7 @@ badframe:
*/
static int
-setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
/*
* Determine which stack to use..
*/
-static inline void *
+static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
/* Default to using normal stack */
@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
- return (void *)((sp - frame_size) & -8UL);
-}
-
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs *regs)
-{
- struct sigframe *frame;
- int err = 0;
- int signal;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- }
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- /* minus 8 is offset to cater for "rtsd r15,8" offset */
- if (ka->sa.sa_flags & SA_RESTORER) {
- regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
- } else {
- /* Note, these encodings are _big endian_! */
-
- /* addi r12, r0, __NR_sigreturn */
- err |= __put_user(0x31800000 | __NR_sigreturn ,
- frame->tramp + 0);
- /* brki r14, 0x8 */
- err |= __put_user(0xb9cc0008, frame->tramp + 1);
-
- /* Return from sighandler will jump to the tramp.
- Negative 8 offset because return is rtsd r15, 8 */
- regs->r15 = ((unsigned long)frame->tramp)-8;
-
- __invalidate_cache_sigtramp((unsigned long)frame->tramp);
- }
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE;
-
- /* Signal handler args: */
- regs->r5 = signal; /* Arg 0: signum */
- regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */
-
- /* Offset of 4 to handle microblaze rtid r14, 0 */
- regs->pc = (unsigned long)ka->sa.sa_handler;
-
- set_fs(USER_DS);
-
-#ifdef DEBUG_SIG
- printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
- current->comm, current->pid, frame, regs->pc);
-#endif
-
- return;
-
-give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
+ return (void __user *)((sp - frame_size) & -8UL);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
- struct rt_sigframe *frame;
+ struct rt_sigframe __user *frame;
int err = 0;
int signal;
@@ -382,7 +306,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
- err |= copy_siginfo_to_user(&frame->info, info);
+ if (info)
+ err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
case -ERESTARTNOINTR:
do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */
+#ifndef CONFIG_MMU
regs->pc -= 4;
+#else
+ /* offset of 8 bytes required = 4 for rtbd
+ offset, plus 4 for size of
+ "brki r14,8"
+ instruction. */
+ regs->pc -= 8;
+#endif
break;
}
}
@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(sig, ka, info, oldset, regs);
else
- setup_frame(sig, ka, oldset, regs);
+ setup_rt_frame(sig, ka, NULL, oldset, regs);
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 3bb42ec924c..376d1789f7c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -2,7 +2,11 @@ ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call,
* used for restarting */
.long sys_exit
- .long sys_ni_syscall /* was fork */
+#ifdef CONFIG_MMU
+ .long sys_fork_wrapper
+#else
+ .long sys_ni_syscall
+#endif
.long sys_read
.long sys_write
.long sys_open /* 5 */
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 293ef486013..eaaaf805f31 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,14 +22,6 @@ void trap_init(void)
__enable_hw_exceptions();
}
-void __bad_xchg(volatile void *ptr, int size)
-{
- printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
- __builtin_return_address(0), ptr, size);
- BUG();
-}
-EXPORT_SYMBOL(__bad_xchg);
-
static int kstack_depth_to_print = 24;
static int __init kstack_setup(char *s)
@@ -105,3 +97,37 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_MMU
+void __bug(const char *file, int line, void *data)
+{
+ if (data)
+ printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
+ file, line, data);
+ else
+ printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
+
+ machine_halt();
+}
+
+int bad_trap(int trap_num, struct pt_regs *regs)
+{
+ printk(KERN_CRIT
+ "unimplemented trap %d called at 0x%08lx, pid %d!\n",
+ trap_num, regs->pc, current->pid);
+ return -ENOSYS;
+}
+
+int debug_trap(struct pt_regs *regs)
+{
+ int i;
+ printk(KERN_CRIT "debug trap\n");
+ for (i = 0; i < 32; i++) {
+ /* printk("r%i:%08X\t",i,regs->gpr[i]); */
+ if ((i % 4) == 3)
+ printk(KERN_CRIT "\n");
+ }
+ printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
+ return -ENOSYS;
+}
+#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 840385e5129..8ae807ab7a5 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -17,8 +17,7 @@ ENTRY(_start)
jiffies = jiffies_64 + 4;
SECTIONS {
- . = CONFIG_KERNEL_BASE_ADDR;
-
+ . = CONFIG_KERNEL_START;
.text : {
_text = . ;
_stext = . ;
@@ -132,6 +131,8 @@ SECTIONS {
__con_initcall_end = .;
}
+ SECURITY_INIT
+
__init_end_before_initramfs = .;
.init.ramfs ALIGN(4096) : {