summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/entry.S')
-rw-r--r--arch/mips/kernel/entry.S54
1 files changed, 23 insertions, 31 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 5eb429137e0..83c87fe4ee4 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -19,11 +19,11 @@
#include <asm/war.h>
#ifdef CONFIG_PREEMPT
- .macro preempt_stop reg=t0
+ .macro preempt_stop
.endm
#else
- .macro preempt_stop reg=t0
- local_irq_disable \reg
+ .macro preempt_stop
+ local_irq_disable
.endm
#define resume_kernel restore_all
#endif
@@ -37,17 +37,18 @@ FEXPORT(ret_from_irq)
andi t0, t0, KU_USER
beqz t0, resume_kernel
-FEXPORT(resume_userspace)
- local_irq_disable t0 # make sure we dont miss an
+resume_userspace:
+ local_irq_disable # make sure we dont miss an
# interrupt setting need_resched
# between sampling and return
LONG_L a2, TI_FLAGS($28) # current->work
- andi a2, _TIF_WORK_MASK # (ignoring syscall_trace)
- bnez a2, work_pending
+ andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
+ bnez t0, work_pending
j restore_all
#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
+resume_kernel:
+ local_irq_disable
lw t0, TI_PRE_COUNT($28)
bnez t0, restore_all
need_resched:
@@ -57,12 +58,7 @@ need_resched:
LONG_L t0, PT_STATUS(sp) # Interrupts off?
andi t0, 1
beqz t0, restore_all
- li t0, PREEMPT_ACTIVE
- sw t0, TI_PRE_COUNT($28)
- local_irq_enable t0
- jal schedule
- sw zero, TI_PRE_COUNT($28)
- local_irq_disable t0
+ jal preempt_schedule_irq
b need_resched
#endif
@@ -88,13 +84,13 @@ FEXPORT(restore_partial) # restore partial frame
RESTORE_SP_AND_RET
.set at
-FEXPORT(work_pending)
- andi t0, a2, _TIF_NEED_RESCHED
+work_pending:
+ andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
jal schedule
- local_irq_disable t0 # make sure need_resched and
+ local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, TI_FLAGS($28)
@@ -109,15 +105,14 @@ work_notifysig: # deal with pending signals and
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
- j restore_all
+ j resume_userspace
FEXPORT(syscall_exit_work_partial)
SAVE_STATIC
-FEXPORT(syscall_exit_work)
- LONG_L t0, TI_FLAGS($28)
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
- and t0, t1
- beqz t0, work_pending # trace bit is set
+syscall_exit_work:
+ li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ and t0, a2 # a2 is preloaded with TI_FLAGS
+ beqz t0, work_pending # trace bit set?
local_irq_enable # could let do_syscall_trace()
# call schedule() instead
move a0, sp
@@ -128,28 +123,25 @@ FEXPORT(syscall_exit_work)
/*
* Common spurious interrupt handler.
*/
- .text
- .align 5
LEAF(spurious_interrupt)
/*
* Someone tried to fool us by sending an interrupt but we
* couldn't find a cause for it.
*/
+ PTR_LA t1, irq_err_count
#ifdef CONFIG_SMP
- lui t1, %hi(irq_err_count)
-1: ll t0, %lo(irq_err_count)(t1)
+1: ll t0, (t1)
addiu t0, 1
- sc t0, %lo(irq_err_count)(t1)
+ sc t0, (t1)
#if R10000_LLSC_WAR
beqzl t0, 1b
#else
beqz t0, 1b
#endif
#else
- lui t1, %hi(irq_err_count)
- lw t0, %lo(irq_err_count)(t1)
+ lw t0, (t1)
addiu t0, 1
- sw t0, %lo(irq_err_count)(t1)
+ sw t0, (t1)
#endif
j ret_from_irq
END(spurious_interrupt)