diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 10:59:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 10:59:07 -0700 |
commit | ec0d7f18ab7b5097d7c0c8f3d909ca1031b9d5cd (patch) | |
tree | 7d62c924592145f819ecaa5d60460a05a10dfdbd /arch/x86/kernel/process.c | |
parent | 269af9a1a08d368b46d72e74126564d04c354f7e (diff) | |
parent | 1dcc8d7ba235a316a056f993e88f0d18b92c60d9 (diff) |
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull fpu state cleanups from Ingo Molnar:
"This tree streamlines further aspects of FPU handling by eliminating
the prepare_to_copy() complication and moving that logic to
arch_dup_task_struct().
It also fixes the FPU dumps in threaded core dumps, removes and old
(and now invalid) assumption plus micro-optimizes the exit path by
avoiding an FPU save for dead tasks."
Fixed up trivial add-add conflict in arch/sh/kernel/process.c that came
in because we now do the FPU handling in arch_dup_task_struct() rather
than the legacy (and now gone) prepare_to_copy().
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, fpu: drop the fpu state during thread exit
x86, xsave: remove thread_has_fpu() bug check in __sanitize_i387_state()
coredump: ensure the fpu state is flushed for proper multi-threaded core dump
fork: move the real prepare_to_copy() users to arch_dup_task_struct()
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f2e8c0f951d..735279e54e5 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -56,10 +56,16 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister); struct kmem_cache *task_xstate_cachep; EXPORT_SYMBOL_GPL(task_xstate_cachep); +/* + * this gets called so that we can store lazy state into memory and copy the + * current task into the new thread. + */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { int ret; + unlazy_fpu(src); + *dst = *src; if (fpu_allocated(&src->thread.fpu)) { memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); @@ -89,6 +95,16 @@ void arch_task_cache_init(void) SLAB_PANIC | SLAB_NOTRACK, NULL); } +static inline void drop_fpu(struct task_struct *tsk) +{ + /* + * Forget coprocessor state.. + */ + tsk->fpu_counter = 0; + clear_fpu(tsk); + clear_used_math(); +} + /* * Free current thread data structures etc.. */ @@ -111,6 +127,8 @@ void exit_thread(void) put_cpu(); kfree(bp); } + + drop_fpu(me); } void show_regs_common(void) @@ -145,12 +163,7 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - /* - * Forget coprocessor state.. - */ - tsk->fpu_counter = 0; - clear_fpu(tsk); - clear_used_math(); + drop_fpu(tsk); } static void hard_disable_TSC(void) |