summaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/entry.S23
-rw-r--r--arch/arm/vfp/vfp.h2
-rw-r--r--arch/arm/vfp/vfphw.S14
-rw-r--r--arch/arm/vfp/vfpmodule.c67
4 files changed, 96 insertions, 10 deletions
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index ba592a9e6fb..a2bed62aec2 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -15,13 +15,16 @@
* r10 = thread_info structure
* lr = failure return
*/
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/asm-offsets.h>
-#include <asm/assembler.h>
+#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
+#include "../kernel/entry-header.S"
ENTRY(do_vfp)
+#ifdef CONFIG_PREEMPT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ add r11, r4, #1 @ increment it
+ str r11, [r10, #TI_PREEMPT]
+#endif
enable_irq
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
@@ -30,6 +33,12 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
+#ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+ str r11, [r10, #TI_PREEMPT]
+#endif
mov pc, lr
ENDPROC(vfp_null_entry)
@@ -41,6 +50,12 @@ ENDPROC(vfp_null_entry)
__INIT
ENTRY(vfp_testing_entry)
+#ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+ str r11, [r10, #TI_PREEMPT]
+#endif
ldr r0, VFP_arch_address
str r5, [r0] @ known non-zero value
mov pc, r9 @ we have handled the fault
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 8de86e4fead..c8c98dd44ad 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -377,6 +377,4 @@ struct op {
u32 flags;
};
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
extern void vfp_save_state(void *location, u32 fpexc);
-#endif
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index c92a08bd6a8..83c4e384b16 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -137,6 +137,12 @@ check_for_exception:
VFPFMXR FPEXC, r1 @ restore FPEXC last
sub r2, r2, #4
str r2, [sp, #S_PC] @ retry the instruction
+#ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+ str r11, [r10, #TI_PREEMPT]
+#endif
mov pc, r9 @ we think we have handled things
@@ -155,6 +161,12 @@ look_for_VFP_exceptions:
@ not recognised by VFP
DBGSTR "not VFP"
+#ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+ str r11, [r10, #TI_PREEMPT]
+#endif
mov pc, lr
process_exception:
@@ -172,7 +184,6 @@ process_exception:
@ retry the faulted instruction
ENDPROC(vfp_support_entry)
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
ENTRY(vfp_save_state)
@ Save the current VFP state
@ r0 - save location
@@ -190,7 +201,6 @@ ENTRY(vfp_save_state)
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
mov pc, lr
ENDPROC(vfp_save_state)
-#endif
last_VFP_context_address:
.word last_VFP_context
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9f476a1be2c..01599c4ef72 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -266,7 +266,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* on VFP subarch 1.
*/
vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
- return;
+ goto exit;
}
/*
@@ -297,7 +297,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
- return;
+ goto exit;
/*
* The barrier() here prevents fpinst2 being read
@@ -310,6 +310,8 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ exit:
+ preempt_enable();
}
static void vfp_enable(void *unused)
@@ -377,6 +379,55 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
+/*
+ * Synchronise the hardware VFP state of a thread other than current with the
+ * saved one. This function is used by the ptrace mechanism.
+ */
+#ifdef CONFIG_SMP
+void vfp_sync_state(struct thread_info *thread)
+{
+ /*
+ * On SMP systems, the VFP state is automatically saved at every
+ * context switch. We mark the thread VFP state as belonging to a
+ * non-existent CPU so that the saved one will be reloaded when
+ * needed.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+}
+#else
+void vfp_sync_state(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+ * If VFP is enabled, the previous state was already saved and
+ * last_VFP_context updated.
+ */
+ if (fpexc & FPEXC_EN)
+ goto out;
+
+ if (!last_VFP_context[cpu])
+ goto out;
+
+ /*
+ * Save the last VFP state on this CPU.
+ */
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(last_VFP_context[cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Set the context to NULL to force a reload the next time the thread
+ * uses the VFP.
+ */
+ last_VFP_context[cpu] = NULL;
+
+out:
+ put_cpu();
+}
+#endif
+
#include <linux/smp.h>
/*
@@ -427,6 +478,18 @@ static int __init vfp_init(void)
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
+#ifdef CONFIG_VFPv3
+ if (VFP_arch >= 3) {
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+ * Check for VFPv3 D16. CPUs in this configuration
+ * only have 16 x 64bit registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+ elf_hwcap |= HWCAP_VFPv3D16;
+ }
+#endif
#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD