MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
authorJim Quinlan <jim2101024@gmail.com>
Thu, 6 Sep 2012 15:36:56 +0000 (11:36 -0400)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 9 Nov 2012 09:59:21 +0000 (10:59 +0100)
For non MIPSr2 processors, such as the BMIPS 5000, calls to
arch_local_irq_disable() and others may be preempted, and in doing
so a stale value may be restored to c0_status.  This fix disables
preemption for such processors prior to the call and enables it
after the call.

Those functions that needed this fix have been "outlined" to
mips-atomic.c, as they are no longer good candidates for inlining.

This bug was observed in a BMIPS 5000, occuring once every few hours
in a continuous reboot test.  It was traced to the write_lock_irq()
function which was being invoked in release_task() in exit.c.
By placing a number of "nops" inbetween the mfc0/mtc0 pair in
arch_local_irq_disable(), which is called by write_lock_irq(), we
were able to greatly increase the occurance of this bug.  Similarly,
the application of this commit silenced the bug.

Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: David Daney <ddaney.cavm@gmail.com>
Cc: Kevin Cernekee cernekee@gmail.com
Cc: Jim Quinlan <jim2101024@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/4321/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/irqflags.h
arch/mips/lib/Makefile
arch/mips/lib/mips-atomic.c [new file with mode: 0644]

index 309cbcd6909cd808b92941e53deea0e3e3a5a756..9f3384c789d7bfdff5511706b0dffc3d2962c96c 100644 (file)
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-       "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
-       "       ori     $1, 0x400                                       \n"
-       "       xori    $1, 0x400                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
-       "       ei                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1e                                         \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
-       "       irq_enable_hazard                                       \n"
-       "       .set    pop                                             \n"
-       "       .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
 
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
 __asm__(
        "       .macro  arch_local_irq_disable\n"
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
 __asm__(
        "       .macro  arch_local_irq_disable\n"
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1                                       \n"
-       "       ori     $1, 0x400                                       \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di                                                      \n"
        "       di                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1f                                         \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
                : "memory");
 }
 
                : "memory");
 }
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
-#else
-       "       mfc0    \\flags, $12                                    \n"
-#endif
-       "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
-       return flags;
-}
 
 __asm__(
        "       .macro  arch_local_irq_save result                      \n"
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
 
 __asm__(
        "       .macro  arch_local_irq_save result                      \n"
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di      \\result                                        \n"
        "       andi    \\result, 1                                     \n"
        "       di      \\result                                        \n"
        "       andi    \\result, 1                                     \n"
-#else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $12                                         \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
        return flags;
 }
 
        return flags;
 }
 
+
 __asm__(
        "       .macro  arch_local_irq_restore flags                    \n"
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
 __asm__(
        "       .macro  arch_local_irq_restore flags                    \n"
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
        /*
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
        "       beqz    \\flags, 1f                                     \n"
        /*
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
        "       beqz    \\flags, 1f                                     \n"
-       "        di                                                     \n"
+       "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
        "       ei                                                      \n"
        "1:                                                             \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
        /*
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
        "       ins     $1, \\flags, 0, 1                               \n"
        "       mtc0    $1, $12                                         \n"
        /*
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
        "       ins     $1, \\flags, 0, 1                               \n"
        "       mtc0    $1, $12                                         \n"
-#else
-       "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
-       "       ori     $1, 0x1f                                        \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
 #endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
 
 #endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
 
-
 static inline void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
 static inline void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-
        __asm__ __volatile__(
                "arch_local_irq_restore\t%0"
                : "=r" (__tmp1)
        __asm__ __volatile__(
                "arch_local_irq_restore\t%0"
                : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
                : "0" (flags)
                : "memory");
 }
                : "0" (flags)
                : "memory");
 }
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+       "       .macro  arch_local_irq_enable                           \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       "       ei                                                      \n"
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1e                                         \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_enable_hazard                                       \n"
+       "       .set    pop                                             \n"
+       "       .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
+               "arch_local_irq_enable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+}
+
+
+__asm__(
+       "       .macro  arch_local_save_flags flags                     \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\flags, $2, 1                                  \n"
+#else
+       "       mfc0    \\flags, $12                                    \n"
+#endif
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
+       asm volatile("arch_local_save_flags %0" : "=r" (flags));
+       return flags;
+}
+
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 #endif
 }
 
 #endif
 }
 
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
 
 /*
  * Do the CPU's IRQ-state tracing from assembly code.
 
 /*
  * Do the CPU's IRQ-state tracing from assembly code.
index a7b893714354547c1b4a875142edeb23b9c1e6b8..eeddc58802e11a57595ee7d123e0538577af0c67 100644 (file)
@@ -3,7 +3,8 @@
 #
 
 lib-y  += bitops.o csum_partial.o delay.o memcpy.o memset.o \
 #
 
 lib-y  += bitops.o csum_partial.o delay.o memcpy.o memset.o \
-          strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+          mips-atomic.o strlen_user.o strncpy_user.o \
+          strnlen_user.o uncached.o
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644 (file)
index 0000000..e091430
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+       "       .macro  arch_local_irq_disable\n"
+       "       .set    push                                            \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       ori     $1, 0x400                                       \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1f                                         \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+void arch_local_irq_disable(void)
+{
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_disable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+       "       .macro  arch_local_irq_save result                      \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\result, $2, 1                                 \n"
+       "       ori     $1, \\result, 0x400                             \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+       "       andi    \\result, \\result, 0x400                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    \\result, $12                                   \n"
+       "       ori     $1, \\result, 0x1f                              \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+       preempt_disable();
+       asm volatile("arch_local_irq_save\t%0"
+                    : "=r" (flags)
+                    : /* no inputs */
+                    : "memory");
+       preempt_enable();
+       return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+       "       .macro  arch_local_irq_restore flags                    \n"
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "mfc0   $1, $2, 1                                               \n"
+       "andi   \\flags, 0x400                                          \n"
+       "ori    $1, 0x400                                               \n"
+       "xori   $1, 0x400                                               \n"
+       "or     \\flags, $1                                             \n"
+       "mtc0   \\flags, $2, 1                                          \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    \\flags, 1                                      \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      \\flags, $1                                     \n"
+       "       mtc0    \\flags, $12                                    \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+void __arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */