diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2009-09-25 18:25:10 +0100 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-11-24 17:45:38 +0900 |
commit | d3ea9fa0a563620fe9f416f94bb8927c64390917 (patch) | |
tree | 0aa1278ac7929f936fc4fd8daf235930f6164d18 /arch/sh/kernel | |
parent | 39ac11c1607f1d566e7cf885acd403fa4f07f8a2 (diff) |
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 4 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh2a/fpu.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 12 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 24 |
4 files changed, 26 insertions, 25 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 580d58b94cc..ad9dfff9427 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -311,12 +311,12 @@ asmlinkage void __init sh_cpu_init(void) if (fpu_disabled) { printk("FPU Disabled\n"); current_cpu_data.flags &= ~CPU_HAS_FPU; - disable_fpu(); } /* FPU initialization */ + disable_fpu(); if ((current_cpu_data.flags & CPU_HAS_FPU)) { - clear_thread_flag(TIF_USEDFPU); + current_thread_info()->status &= ~TS_USEDFPU; clear_used_math(); } diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 6df2fb98eb3..13817ee49d5 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c @@ -25,14 +25,12 @@ /* * Save FPU registers onto task structure. - * Assume called with FPU enabled (SR.FD=0). */ void -save_fpu(struct task_struct *tsk, struct pt_regs *regs) +save_fpu(struct task_struct *tsk) { unsigned long dummy; - clear_tsk_thread_flag(tsk, TIF_USEDFPU); enable_fpu(); asm volatile("sts.l fpul, @-%0\n\t" "sts.l fpscr, @-%0\n\t" @@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs) : "memory"); disable_fpu(); - release_fpu(regs); } static void @@ -598,13 +595,13 @@ BUILD_TRAP_HANDLER(fpu_error) struct task_struct *tsk = current; TRAP_HANDLER_DECL; - save_fpu(tsk, regs); + __unlazy_fpu(tsk, regs); if (ieee_fpe_handler(regs)) { tsk->thread.fpu.hard.fpscr &= ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); grab_fpu(regs); restore_fpu(tsk); - set_tsk_thread_flag(tsk, TIF_USEDFPU); + task_thread_info(tsk)->status |= TS_USEDFPU; return; } @@ -630,5 +627,5 @@ BUILD_TRAP_HANDLER(fpu_state_restore) fpu_init(); set_used_math(); } - set_tsk_thread_flag(tsk, TIF_USEDFPU); + task_thread_info(tsk)->status |= TS_USEDFPU; } diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index d79226fa59d..e97857aec8a 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags; /* * Save FPU registers onto task structure. - * Assume called with FPU enabled (SR.FD=0). */ -void save_fpu(struct task_struct *tsk, struct pt_regs *regs) +void save_fpu(struct task_struct *tsk) { unsigned long dummy; - clear_tsk_thread_flag(tsk, TIF_USEDFPU); enable_fpu(); asm volatile ("sts.l fpul, @-%0\n\t" "sts.l fpscr, @-%0\n\t" @@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs) :"memory"); disable_fpu(); - release_fpu(regs); } static void restore_fpu(struct task_struct *tsk) @@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) /* fcnvsd */ struct task_struct *tsk = current; - save_fpu(tsk, regs); if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) /* FPU error */ denormal_to_double(&tsk->thread.fpu.hard, @@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error) struct task_struct *tsk = current; TRAP_HANDLER_DECL; - save_fpu(tsk, regs); + __unlazy_fpu(tsk, regs); fpu_exception_flags = 0; if (ieee_fpe_handler(regs)) { tsk->thread.fpu.hard.fpscr &= @@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error) tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); grab_fpu(regs); restore_fpu(tsk); - set_tsk_thread_flag(tsk, TIF_USEDFPU); + task_thread_info(tsk)->status |= TS_USEDFPU; if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & (fpu_exception_flags >> 2)) == 0) { return; @@ -502,7 +498,7 @@ void fpu_state_restore(struct pt_regs *regs) fpu_init(); set_used_math(); } - set_tsk_thread_flag(tsk, TIF_USEDFPU); + task_thread_info(tsk)->status |= TS_USEDFPU; tsk->fpu_counter++; } diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 7733f5fa6bb..d721f9297c0 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) regs.regs[5] = (unsigned long)fn; regs.pc = (unsigned long)kernel_thread_helper; - regs.sr = (1 << 30); + regs.sr = SR_MD; +#if defined(CONFIG_SH_FPU) + regs.sr |= SR_FD; +#endif /* Ok, create the new process.. */ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, @@ -189,6 +192,15 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) } EXPORT_SYMBOL(dump_fpu); +/* + * This gets called before we allocate a new thread and copy + * the current task into it. + */ +void prepare_to_copy(struct task_struct *tsk) +{ + unlazy_fpu(tsk, task_pt_regs(tsk)); +} + asmlinkage void ret_from_fork(void); int copy_thread(unsigned long clone_flags, unsigned long usp, @@ -197,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; -#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) +#if defined(CONFIG_SH_DSP) struct task_struct *tsk = current; #endif -#if defined(CONFIG_SH_FPU) - unlazy_fpu(tsk, regs); - p->thread.fpu = tsk->thread.fpu; - copy_to_stopped_child_used_math(p); -#endif - #if defined(CONFIG_SH_DSP) if (is_dsp_enabled(tsk)) { /* We can use the __save_dsp or just copy the struct: @@ -226,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, } else { childregs->regs[15] = (unsigned long)childregs; ti->addr_limit = KERNEL_DS; + ti->status &= ~TS_USEDFPU; + p->fpu_counter = 0; } if (clone_flags & CLONE_SETTLS) |