summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJim Quinlan <jim2101024@gmail.com>2012-09-06 11:36:56 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-11-09 10:59:21 +0100
commite97c5b609880d97313b13eb71830fca62cee50c2 (patch)
treed43e3033f37cc9181dfe145b1019237429cfb418 /arch
parent92d11594f688c8b55b51e80f2eac4417396237a4 (diff)
MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
For non MIPSr2 processors, such as the BMIPS 5000, calls to arch_local_irq_disable() and others may be preempted, and in doing so a stale value may be restored to c0_status. This fix disables preemption for such processors prior to the call and enables it after the call. Those functions that needed this fix have been "outlined" to mips-atomic.c, as they are no longer good candidates for inlining. This bug was observed in a BMIPS 5000, occuring once every few hours in a continuous reboot test. It was traced to the write_lock_irq() function which was being invoked in release_task() in exit.c. By placing a number of "nops" inbetween the mfc0/mtc0 pair in arch_local_irq_disable(), which is called by write_lock_irq(), we were able to greatly increase the occurance of this bug. Similarly, the application of this commit silenced the bug. Signed-off-by: Jim Quinlan <jim2101024@gmail.com> Cc: linux-mips@linux-mips.org Cc: David Daney <ddaney.cavm@gmail.com> Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan <jim2101024@gmail.com> Patchwork: https://patchwork.linux-mips.org/patch/4321/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/irqflags.h207
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/mips-atomic.c176
3 files changed, 253 insertions, 133 deletions
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd6909..9f3384c789d 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
#include <linux/compiler.h>
#include <asm/hazards.h>
-__asm__(
- " .macro arch_local_irq_enable \n"
- " .set push \n"
- " .set reorder \n"
- " .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
- " ori $1, 0x400 \n"
- " xori $1, 0x400 \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
- " ei \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1e \n"
- " mtc0 $1,$12 \n"
-#endif
- " irq_enable_hazard \n"
- " .set pop \n"
- " .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
- __asm__ __volatile__(
- "arch_local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 \n"
- " ori $1, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1f \n"
- " .set noreorder \n"
- " mtc0 $1,$12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
-__asm__(
- " .macro arch_local_save_flags flags \n"
- " .set push \n"
- " .set reorder \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\flags, $2, 1 \n"
-#else
- " mfc0 \\flags, $12 \n"
-#endif
- " .set pop \n"
- " .endm \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile("arch_local_save_flags %0" : "=r" (flags));
- return flags;
-}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\result, $2, 1 \n"
- " ori $1, \\result, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
- " andi \\result, \\result, 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
-#else
- " mfc0 \\result, $12 \n"
- " ori $1, \\result, 0x1f \n"
- " xori $1, 0x1f \n"
- " .set noreorder \n"
- " mtc0 $1, $12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
+
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- "mfc0 $1, $2, 1 \n"
- "andi \\flags, 0x400 \n"
- "ori $1, 0x400 \n"
- "xori $1, 0x400 \n"
- "or \\flags, $1 \n"
- "mtc0 \\flags, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
- " di \n"
+ " di \n"
" ei \n"
"1: \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
-#else
- " mfc0 $1, $12 \n"
- " andi \\flags, 1 \n"
- " ori $1, 0x1f \n"
- " xori $1, 0x1f \n"
- " or \\flags, $1 \n"
- " mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
-
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of branch and call overhead on each
- * local_irq_restore()
- */
- if (unlikely(!(flags & 0x0400)))
- smtc_ipi_replay();
-#endif
-
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
}
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+ " .macro arch_local_irq_enable \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ " ei \n"
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1e \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_enable_hazard \n"
+ " .set pop \n"
+ " .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
+ __asm__ __volatile__(
+ "arch_local_irq_enable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+
+__asm__(
+ " .macro arch_local_save_flags flags \n"
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\flags, $2, 1 \n"
+#else
+ " mfc0 \\flags, $12 \n"
+#endif
+ " .set pop \n"
+ " .endm \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("arch_local_save_flags %0" : "=r" (flags));
+ return flags;
+}
+
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index a7b89371435..eeddc58802e 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,8 @@
#
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+ mips-atomic.o strlen_user.o strncpy_user.o \
+ strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 00000000000..e091430dbeb
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+ " .macro arch_local_irq_disable\n"
+ " .set push \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 \n"
+ " ori $1, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+void arch_local_irq_disable(void)
+{
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_disable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+ " .macro arch_local_irq_save result \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\result, $2, 1 \n"
+ " ori $1, \\result, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+ " andi \\result, \\result, 0x400 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 \\result, $12 \n"
+ " ori $1, \\result, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ preempt_disable();
+ asm volatile("arch_local_irq_save\t%0"
+ : "=r" (flags)
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+ " .macro arch_local_irq_restore flags \n"
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ "mfc0 $1, $2, 1 \n"
+ "andi \\flags, 0x400 \n"
+ "ori $1, 0x400 \n"
+ "xori $1, 0x400 \n"
+ "or \\flags, $1 \n"
+ "mtc0 \\flags, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+ /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1, $12 \n"
+ " andi \\flags, 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or \\flags, $1 \n"
+ " mtc0 \\flags, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of branch and call overhead on each
+ * local_irq_restore()
+ */
+ if (unlikely(!(flags & 0x0400)))
+ smtc_ipi_replay();
+#endif
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+void __arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */