diff options
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r-- | arch/arm/vfp/vfphw.S | 63 | ||||
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 127 |
2 files changed, 114 insertions, 76 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d..2d30c7f6edd 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -77,27 +77,27 @@ ENTRY(vfp_support_entry) bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 - ldr r3, last_VFP_context_address + ldr r3, vfp_current_hw_state_address orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set - ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer + ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled - cmp r4, r10 - beq check_for_exception @ we are returning to the same - @ process, so the registers are - @ still there. In this case, we do - @ not want to drop a pending exception. + cmp r4, r10 @ this thread owns the hw context? +#ifndef CONFIG_SMP + @ For UP, checking that this thread owns the hw context is + @ sufficient to determine that the hardware state is valid. + beq vfp_hw_state_valid + + @ On UP, we lazily save the VFP context. As a different + @ thread wants ownership of the VFP hardware, save the old + @ state if there was a previous (valid) owner. VFPFMXR FPEXC, r5 @ enable VFP, disable any pending @ exceptions, so we can get at the @ rest of it -#ifndef CONFIG_SMP - @ Save out the current registers to the old thread state - @ No need for SMP since this is not done lazily - DBGSTR1 "save old state %p", r4 - cmp r4, #0 - beq no_old_VFP_process + cmp r4, #0 @ if the vfp_current_hw_state is NULL + beq vfp_reload_hw @ then the hw state needs reloading VFPFSTMIA r4, r5 @ save the working registers VFPFMRX r5, FPSCR @ current status #ifndef CONFIG_CPU_FEROCEON @@ -110,13 +110,35 @@ ENTRY(vfp_support_entry) 1: #endif stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 - @ and point r4 at the word at the - @ start of the register dump +vfp_reload_hw: + +#else + @ For SMP, if this thread does not own the hw context, then we + @ need to reload it. No need to save the old state as on SMP, + @ we always save the state when we switch away from a thread. + bne vfp_reload_hw + + @ This thread has ownership of the current hardware context. + @ However, it may have been migrated to another CPU, in which + @ case the saved state is newer than the hardware context. + @ Check this by looking at the CPU number which the state was + @ last loaded onto. + ldr ip, [r10, #VFP_CPU] + teq ip, r11 + beq vfp_hw_state_valid + +vfp_reload_hw: + @ We're loading this threads state into the VFP hardware. Update + @ the CPU number which contains the most up to date VFP context. + str r11, [r10, #VFP_CPU] + + VFPFMXR FPEXC, r5 @ enable VFP, disable any pending + @ exceptions, so we can get at the + @ rest of it #endif -no_old_VFP_process: DBGSTR1 "load state %p", r10 - str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer + str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state @@ -132,7 +154,8 @@ no_old_VFP_process: #endif VFPFMXR FPSCR, r5 @ restore status -check_for_exception: +@ The context stored in the VFP hardware is up to date with this thread +vfp_hw_state_valid: tst r1, #FPEXC_EX bne process_exception @ might as well handle the pending @ exception before retrying branch @@ -207,8 +230,8 @@ ENTRY(vfp_save_state) ENDPROC(vfp_save_state) .align -last_VFP_context_address: - .word last_VFP_context +vfp_current_hw_state_address: + .word vfp_current_hw_state .macro tbl_branch, base, tmp, shift #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 650d90be0f9..79bcb431693 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -33,7 +33,6 @@ void vfp_support_entry(void); void vfp_null_entry(void); void (*vfp_vector)(void) = vfp_null_entry; -union vfp_state *last_VFP_context[NR_CPUS]; /* * Dual-use variable. @@ -43,6 +42,46 @@ union vfp_state *last_VFP_context[NR_CPUS]; unsigned int VFP_arch; /* + * The pointer to the vfpstate structure of the thread which currently + * owns the context held in the VFP hardware, or NULL if the hardware + * context is invalid. + * + * For UP, this is sufficient to tell which thread owns the VFP context. + * However, for SMP, we also need to check the CPU number stored in the + * saved state too to catch migrations. + */ +union vfp_state *vfp_current_hw_state[NR_CPUS]; + +/* + * Is 'thread's most up to date state stored in this CPUs hardware? + * Must be called from non-preemptible context. + */ +static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) +{ +#ifdef CONFIG_SMP + if (thread->vfpstate.hard.cpu != cpu) + return false; +#endif + return vfp_current_hw_state[cpu] == &thread->vfpstate; +} + +/* + * Force a reload of the VFP context from the thread structure. We do + * this by ensuring that access to the VFP hardware is disabled, and + * clear last_VFP_context. Must be called from non-preemptible context. + */ +static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) +{ + if (vfp_state_in_hw(cpu, thread)) { + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + vfp_current_hw_state[cpu] = NULL; + } +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} + +/* * Per-thread VFP initialization. */ static void vfp_thread_flush(struct thread_info *thread) @@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu; - memset(vfp, 0, sizeof(union vfp_state)); - - vfp->hard.fpexc = FPEXC_EN; - vfp->hard.fpscr = FPSCR_ROUND_NEAREST; - /* * Disable VFP to ensure we initialize it first. We must ensure - * that the modification of last_VFP_context[] and hardware disable - * are done for the same CPU and without preemption. + * that the modification of vfp_current_hw_state[] and hardware + * disable are done for the same CPU and without preemption. + * + * Do this first to ensure that preemption won't overwrite our + * state saving should access to the VFP be enabled at this point. */ cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); put_cpu(); + + memset(vfp, 0, sizeof(union vfp_state)); + + vfp->hard.fpexc = FPEXC_EN; + vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +#ifdef CONFIG_SMP + vfp->hard.cpu = NR_CPUS; +#endif } static void vfp_thread_exit(struct thread_info *thread) @@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; put_cpu(); } @@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread) vfp_sync_hwstate(parent); thread->vfpstate = parent->vfpstate; +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif } /* @@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { - vfp_save_state(last_VFP_context[cpu], fpexc); - last_VFP_context[cpu]->hard.cpu = cpu; - } - /* - * Thread migration, just force the reloading of the - * state on the new CPU in case the VFP registers - * contain stale data. - */ - if (thread->vfpstate.hard.cpu != cpu) - last_VFP_context[cpu] = NULL; + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) + vfp_save_state(vfp_current_hw_state[cpu], fpexc); #endif /* @@ -415,7 +454,7 @@ static int vfp_pm_suspend(void) } /* clear any information we had about last context state */ - memset(last_VFP_context, 0, sizeof(last_VFP_context)); + memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); return 0; } @@ -443,15 +482,15 @@ static void vfp_pm_init(void) static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ +/* + * Ensure that the VFP state stored in 'thread->vfpstate' is up to date + * with the hardware state. + */ void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (last_VFP_context[cpu] == &thread->vfpstate) { + if (vfp_state_in_hw(cpu, thread)) { u32 fpexc = fmrx(FPEXC); /* @@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread) put_cpu(); } +/* Ensure that the thread reloads the hardware VFP state on the next use. */ void vfp_flush_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (last_VFP_context[cpu] == &thread->vfpstate) { - u32 fpexc = fmrx(FPEXC); - - fmxr(FPEXC, fpexc & ~FPEXC_EN); - - /* - * Set the context to NULL to force a reload the next time - * the thread uses the VFP. - */ - last_VFP_context[cpu] = NULL; - } + vfp_force_reload(cpu, thread); -#ifdef CONFIG_SMP - /* - * For SMP we still have to take care of the case where the thread - * migrates to another CPU and then back to the original CPU on which - * the last VFP user is still the same thread. Mark the thread VFP - * state as belonging to a non-existent CPU so that the saved one will - * be reloaded in the above case. - */ - thread->vfpstate.hard.cpu = NR_CPUS; -#endif put_cpu(); } @@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { - unsigned int cpu = (long)hcpu; - last_VFP_context[cpu] = NULL; + vfp_force_reload((long)hcpu, current_thread_info()); } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; |