diff options
-rw-r--r-- | arch/sh/include/asm/fpu.h | 3 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 16 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 16 |
3 files changed, 31 insertions, 4 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 1d3aee04b5c..bfd78e19de1 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -19,6 +19,7 @@ static inline void grab_fpu(struct pt_regs *regs) struct task_struct; extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); +void fpu_state_restore(struct pt_regs *regs); #else #define release_fpu(regs) do { } while (0) @@ -44,6 +45,8 @@ static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) preempt_disable(); if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) save_fpu(tsk, regs); + else + tsk->fpu_counter = 0; preempt_enable(); } diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e3ea5411da6..d79226fa59d 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -483,18 +483,18 @@ BUILD_TRAP_HANDLER(fpu_error) force_sig(SIGFPE, tsk); } -BUILD_TRAP_HANDLER(fpu_state_restore) +void fpu_state_restore(struct pt_regs *regs) { struct task_struct *tsk = current; - TRAP_HANDLER_DECL; grab_fpu(regs); - if (!user_mode(regs)) { + if (unlikely(!user_mode(regs))) { printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); + BUG(); return; } - if (used_math()) { + if (likely(used_math())) { /* Using the FPU again. */ restore_fpu(tsk); } else { @@ -503,4 +503,12 @@ BUILD_TRAP_HANDLER(fpu_state_restore) set_used_math(); } set_tsk_thread_flag(tsk, TIF_USEDFPU); + tsk->fpu_counter++; +} + +BUILD_TRAP_HANDLER(fpu_state_restore) +{ + TRAP_HANDLER_DECL; + + fpu_state_restore(regs); } diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be..aff5fe02e39 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -288,8 +288,14 @@ static void ubc_set_tracing(int asid, unsigned long pc) __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next) { + struct thread_struct *next_t = &next->thread; + #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); + + /* we're going to use this soon, after a few expensive things */ + if (next->fpu_counter > 5) + prefetch(&next_t->fpu.hard); #endif #ifdef CONFIG_MMU @@ -321,6 +327,16 @@ __switch_to(struct task_struct *prev, struct task_struct *next) #endif } +#if defined(CONFIG_SH_FPU) + /* If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next->fpu_counter > 5) { + fpu_state_restore(task_pt_regs(next)); + } +#endif + return prev; } |