summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c1
-rw-r--r--arch/mips/fw/arc/misc.c1
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/bitops.h128
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/delay.h6
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/asm/irqflags.h207
-rw-r--r--arch/mips/include/asm/pgtable-64.h15
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/ptrace.h6
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/jz4740/serial.h3
-rw-r--r--arch/mips/kernel/entry.S6
-rw-r--r--arch/mips/kernel/linux32.c21
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/process.c62
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c26
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/syscall.c53
-rw-r--r--arch/mips/lib/Makefile5
-rw-r--r--arch/mips/lib/bitops.c179
-rw-r--r--arch/mips/lib/delay.c6
-rw-r--r--arch/mips/lib/dump_tlb.c4
-rw-r--r--arch/mips/lib/mips-atomic.c176
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c56
-rw-r--r--arch/mips/mti-malta/malta-platform.c3
32 files changed, 606 insertions, 382 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dba9390d37c..4183e62f178 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -40,6 +40,8 @@ config MIPS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA if 64BIT
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
menu "Machine selection"
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index d38246e33dd..9f883bf7695 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
+#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
index 7cf80ca2c1d..f9f5307434c 100644
--- a/arch/mips/fw/arc/misc.c
+++ b/arch/mips/fw/arc/misc.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/irqflags.h>
#include <asm/bcache.h>
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index e69de29bb2d..533053d12ce 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -0,0 +1 @@
+# MIPS headers
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 82ad35ce2b4..46ac73abd5e 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -14,7 +14,6 @@
#endif
#include <linux/compiler.h>
-#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
@@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr);
+
+
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_set_bit(nr, addr);
}
/*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_clear_bit(nr, addr);
}
/*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_change_bit(nr, addr);
}
/*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 58277e0e9cd..3c5d1464b7b 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
- return test_thread_flag(TIF_32BIT);
+ return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */
diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h
index e7cd78277c2..dc0a5f77a35 100644
--- a/arch/mips/include/asm/delay.h
+++ b/arch/mips/include/asm/delay.h
@@ -13,9 +13,9 @@
#include <linux/param.h>
-extern void __delay(unsigned int loops);
-extern void __ndelay(unsigned int ns);
-extern void __udelay(unsigned int us);
+extern void __delay(unsigned long loops);
+extern void __ndelay(unsigned long ns);
+extern void __udelay(unsigned long us);
#define ndelay(ns) __ndelay(ns)
#define udelay(us) __udelay(us)
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 29d9c23c20c..ff2e0345e01 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 309cbcd6909..9f3384c789d 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
#include <linux/compiler.h>
#include <asm/hazards.h>
-__asm__(
- " .macro arch_local_irq_enable \n"
- " .set push \n"
- " .set reorder \n"
- " .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
- " ori $1, 0x400 \n"
- " xori $1, 0x400 \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
- " ei \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1e \n"
- " mtc0 $1,$12 \n"
-#endif
- " irq_enable_hazard \n"
- " .set pop \n"
- " .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
- __asm__ __volatile__(
- "arch_local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 \n"
- " ori $1, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1f \n"
- " .set noreorder \n"
- " mtc0 $1,$12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
-__asm__(
- " .macro arch_local_save_flags flags \n"
- " .set push \n"
- " .set reorder \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\flags, $2, 1 \n"
-#else
- " mfc0 \\flags, $12 \n"
-#endif
- " .set pop \n"
- " .endm \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile("arch_local_save_flags %0" : "=r" (flags));
- return flags;
-}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\result, $2, 1 \n"
- " ori $1, \\result, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
- " andi \\result, \\result, 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
-#else
- " mfc0 \\result, $12 \n"
- " ori $1, \\result, 0x1f \n"
- " xori $1, 0x1f \n"
- " .set noreorder \n"
- " mtc0 $1, $12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
+
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- "mfc0 $1, $2, 1 \n"
- "andi \\flags, 0x400 \n"
- "ori $1, 0x400 \n"
- "xori $1, 0x400 \n"
- "or \\flags, $1 \n"
- "mtc0 \\flags, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
- " di \n"
+ " di \n"
" ei \n"
"1: \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
-#else
- " mfc0 $1, $12 \n"
- " andi \\flags, 1 \n"
- " ori $1, 0x1f \n"
- " xori $1, 0x1f \n"
- " or \\flags, $1 \n"
- " mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
-
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of branch and call overhead on each
- * local_irq_restore()
- */
- if (unlikely(!(flags & 0x0400)))
- smtc_ipi_replay();
-#endif
-
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
}
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+ " .macro arch_local_irq_enable \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ " ei \n"
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1e \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_enable_hazard \n"
+ " .set pop \n"
+ " .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
+ __asm__ __volatile__(
+ "arch_local_irq_enable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+
+__asm__(
+ " .macro arch_local_save_flags flags \n"
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\flags, $2, 1 \n"
+#else
+ " mfc0 \\flags, $12 \n"
+#endif
+ " .set pop \n"
+ " .endm \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("arch_local_save_flags %0" : "=r" (flags));
+ return flags;
+}
+
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index c26e1825007..f5b521d5a67 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -9,6 +9,7 @@
#ifndef _ASM_PGTABLE_64_H
#define _ASM_PGTABLE_64_H
+#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/addrspace.h>
@@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == (unsigned long) invalid_pte_table;
}
-#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+static inline int pmd_bad(pmd_t pmd)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ /* pmd_huge(pmd) but inline */
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return 0;
+#endif
+
+ if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
static inline int pmd_present(pmd_t pmd)
{
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 5e33fabe354..d28c41e0887 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -310,8 +310,6 @@ struct task_struct;
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
-extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 4f5da948a77..cec5e125f7e 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -61,4 +61,10 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
die(str, regs);
}
+#define current_pt_regs() \
+({ \
+ unsigned long sp = (unsigned long)__builtin_frame_address(0); \
+ (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
+})
+
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 8debe9e9175..18806a52061 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 9e47cc11aa2..b306e2081ca 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -20,6 +20,7 @@
#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/mips/jz4740/serial.h b/arch/mips/jz4740/serial.h
index b9fe3ade028..8eb715bb1ea 100644
--- a/arch/mips/jz4740/serial.h
+++ b/arch/mips/jz4740/serial.h
@@ -14,6 +14,9 @@
*/
#ifndef __MIPS_JZ4740_SERIAL_H__
+#define __MIPS_JZ4740_SERIAL_H__
+
+struct uart_port;
void jz4740_serial_out(struct uart_port *p, int offset, int value);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index a6c13321200..3320cb4ac1d 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -65,6 +65,12 @@ need_resched:
b need_resched
#endif
+FEXPORT(ret_from_kernel_thread)
+ jal schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jal s0
+ j syscall_exit
+
FEXPORT(ret_from_fork)
jal schedule_tail # a0 = struct task_struct *prev
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 3a21acedf88..8796dbc7e35 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -3,7 +3,6 @@
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson (ulfc@engr.sgi.com)
- * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com)
*/
#include <linux/compiler.h>
#include <linux/mm.h>
@@ -77,26 +76,6 @@ out:
return error;
}
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
-{
- int error;
- struct filename *filename;
-
- filename = getname(compat_ptr(regs.regs[4]));
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = compat_do_execve(filename->name, compat_ptr(regs.regs[5]),
- compat_ptr(regs.regs[6]), &regs);
- putname(filename);
-
-out:
- return error;
-}
-
#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 3fc1691110d..2d9304c2b54 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(kernel_thread);
-
/*
* Functions that operate on entire pages. Mostly used by memory management.
*/
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index e9a5fd7277f..d13720ac656 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -84,6 +84,7 @@ void __noreturn cpu_idle(void)
}
asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
@@ -113,7 +114,7 @@ void flush_thread(void)
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+ unsigned long arg, struct task_struct *p, struct pt_regs *regs)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
@@ -136,19 +137,30 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
+ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ unsigned long status = p->thread.cp0_status;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ ti->addr_limit = KERNEL_DS;
+ p->thread.reg16 = usp; /* fn */
+ p->thread.reg17 = arg;
+ p->thread.reg29 = childksp;
+ p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((status & (ST0_KUC | ST0_IEC)) << 2);
+#else
+ status |= ST0_EXL;
+#endif
+ childregs->cp0_status = status;
+ return 0;
+ }
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
-
childregs->regs[2] = 0; /* Child gets zero as return value */
+ childregs->regs[29] = usp;
+ ti->addr_limit = USER_DS;
- if (childregs->cp0_status & ST0_CU0) {
- childregs->regs[28] = (unsigned long) ti;
- childregs->regs[29] = childksp;
- ti->addr_limit = KERNEL_DS;
- } else {
- childregs->regs[29] = usp;
- ti->addr_limit = USER_DS;
- }
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
@@ -156,7 +168,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
- p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
#ifdef CONFIG_MIPS_MT_SMTC
@@ -222,35 +233,6 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
}
/*
- * Create a kernel thread
- */
-static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
-{
- do_exit(fn(arg));
-}
-
-long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof(regs));
-
- regs.regs[4] = (unsigned long) arg;
- regs.regs[5] = (unsigned long) fn;
- regs.cp0_epc = (unsigned long) kernel_thread_helper;
- regs.cp0_status = read_c0_status();
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
- ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
-#else
- regs.cp0_status |= ST0_EXL;
-#endif
-
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-
-/*
*
*/
struct mips_frame_info {
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f6ba8381ee0..d27ca340d46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -167,7 +167,7 @@ EXPORT(sysn32_call_table)
PTR sys_getsockopt
PTR sys_clone /* 6055 */
PTR sys_fork
- PTR sys32_execve
+ PTR compat_sys_execve
PTR sys_exit
PTR compat_sys_wait4
PTR sys_kill /* 6060 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 53c2d724576..9601be6afa3 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -203,7 +203,7 @@ sys_call_table:
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 4010 */
- PTR sys32_execve
+ PTR compat_sys_execve
PTR sys_chdir
PTR compat_sys_time
PTR sys_mknod
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a53f8ec37aa..290dc6a1d7a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", };
void __init add_memory_region(phys_t start, phys_t size, long type)
{
int x = boot_mem_map.nr_map;
- struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+ int i;
/* Sanity check */
if (start + size < start) {
@@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
}
/*
- * Try to merge with previous entry if any. This is far less than
- * perfect but is sufficient for most real world cases.
+ * Try to merge with existing entry, if any.
*/
- if (x && prev->addr + prev->size == start && prev->type == type) {
- prev->size += size;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+ unsigned long top;
+
+ if (entry->type != type)
+ continue;
+
+ if (start + size < entry->addr)
+ continue; /* no overlap */
+
+ if (entry->addr + entry->size < start)
+ continue; /* no overlap */
+
+ top = max(entry->addr + entry->size, start + size);
+ entry->addr = min(entry->addr, start);
+ entry->size = top - entry->addr;
+
return;
}
- if (x == BOOT_MEM_MAP_MAX) {
+ if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index afc379ca375..06cd0c610f4 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -97,7 +97,7 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */
- c->core = (read_c0_ebase() >> 1) & 0xff;
+ c->core = (read_c0_ebase() >> 1) & 0x1ff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 2bd561bc05a..c611e2df776 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -127,28 +127,6 @@ _sys_clone(nabi_no_regargs struct pt_regs regs)
parent_tidptr, child_tidptr);
}
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
-{
- int error;
- struct filename *filename;
-
- filename = getname((const char __user *) (long)regs.regs[4]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename->name,
- (const char __user *const __user *) (long)regs.regs[5],
- (const char __user *const __user *) (long)regs.regs[6],
- &regs);
- putname(filename);
-
-out:
- return error;
-}
-
SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
{
struct thread_info *ti = task_thread_info(current);
@@ -313,34 +291,3 @@ asmlinkage void bad_stack(void)
{
do_exit(SIGSEGV);
}
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
-{
- register unsigned long __a0 asm("$4") = (unsigned long) filename;
- register unsigned long __a1 asm("$5") = (unsigned long) argv;
- register unsigned long __a2 asm("$6") = (unsigned long) envp;
- register unsigned long __a3 asm("$7");
- unsigned long __v0;
-
- __asm__ volatile (" \n"
- " .set noreorder \n"
- " li $2, %5 # __NR_execve \n"
- " syscall \n"
- " move %0, $2 \n"
- " .set reorder \n"
- : "=&r" (__v0), "=r" (__a3)
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
- "memory");
-
- if (__a3 == 0)
- return __v0;
-
- return -__v0;
-}
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index c4a82e841c7..eeddc58802e 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,8 +2,9 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o delay.o memcpy.o memset.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strlen_user.o strncpy_user.o \
+ strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644
index 00000000000..239a9c957b0
--- /dev/null
+++ b/arch/mips/lib/bitops.c
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory. This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value. This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value. This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 5995969e8c4..dc81ca8dc0d 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -15,13 +15,17 @@
#include <asm/compiler.h>
#include <asm/war.h>
-inline void __delay(unsigned int loops)
+void __delay(unsigned long loops)
{
__asm__ __volatile__ (
" .set noreorder \n"
" .align 3 \n"
"1: bnez %0, 1b \n"
+#if __SIZEOF_LONG__ == 4
" subu %0, 1 \n"
+#else
+ " dsubu %0, 1 \n"
+#endif
" .set reorder \n"
: "=r" (loops)
: "0" (loops));
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 3f69725556a..a99c1d3fc56 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -50,8 +50,9 @@ static void dump_tlb(int first, int last)
{
unsigned long s_entryhi, entryhi, asid;
unsigned long long entrylo0, entrylo1;
- unsigned int s_index, pagemask, c0, c1, i;
+ unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
+ s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
asid = s_entryhi & 0xff;
@@ -103,6 +104,7 @@ static void dump_tlb(int first, int last)
write_c0_entryhi(s_entryhi);
write_c0_index(s_index);
+ write_c0_pagemask(s_pagemask);
}
void dump_tlb_all(void)
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 00000000000..cd160be3ce4
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+ " .macro arch_local_irq_disable\n"
+ " .set push \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 \n"
+ " ori $1, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_disable(void)
+{
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_disable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+ " .macro arch_local_irq_save result \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\result, $2, 1 \n"
+ " ori $1, \\result, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+ " andi \\result, \\result, 0x400 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 \\result, $12 \n"
+ " ori $1, \\result, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ preempt_disable();
+ asm volatile("arch_local_irq_save\t%0"
+ : "=r" (flags)
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+ " .macro arch_local_irq_restore flags \n"
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ "mfc0 $1, $2, 1 \n"
+ "andi \\flags, 0x400 \n"
+ "ori $1, 0x400 \n"
+ "xori $1, 0x400 \n"
+ "or \\flags, $1 \n"
+ "mtc0 \\flags, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+ /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1, $12 \n"
+ " andi \\flags, 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or \\flags, $1 \n"
+ " mtc0 \\flags, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of branch and call overhead on each
+ * local_irq_restore()
+ */
+ if (unlikely(!(flags & 0x0400)))
+ smtc_ipi_replay();
+#endif
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 87b9cfcc30f..4b9b935a070 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_random();
else
tlb_write_indexed();
+ tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 658a520364c..2833dcb67b5 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -148,8 +148,8 @@ enum label_id {
label_leave,
label_vmalloc,
label_vmalloc_done,
- label_tlbw_hazard,
- label_split,
+ label_tlbw_hazard_0,
+ label_split = label_tlbw_hazard_0 + 8,
label_tlbl_goaround1,
label_tlbl_goaround2,
label_nopage_tlbl,
@@ -167,7 +167,7 @@ UASM_L_LA(_second_part)
UASM_L_LA(_leave)
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
-UASM_L_LA(_tlbw_hazard)
+/* _tlbw_hazard_x is handled differently. */
UASM_L_LA(_split)
UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2)
@@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update)
#endif
+static int __cpuinitdata hazard_instance;
+
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
+ return;
+ default:
+ BUG();
+ }
+}
+
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
+ break;
+ default:
+ BUG();
+ }
+}
+
/*
* For debug purposes.
*/
@@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction.
*/
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
+ uasm_bgezl_hazard(p, r, hazard_instance);
tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
+ uasm_bgezl_label(l, p, hazard_instance);
+ hazard_instance++;
uasm_i_nop(p);
break;
case CPU_R4600:
case CPU_R4700:
- case CPU_R5000:
- case CPU_R5000A:
uasm_i_nop(p);
tlbw(p);
uasm_i_nop(p);
break;
+ case CPU_R5000:
+ case CPU_R5000A:
+ case CPU_NEVADA:
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ tlbw(p);
+ break;
+
case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
@@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_NEVADA:
- uasm_i_nop(p); /* QED specifies 2 nops hazard */
- /*
- * This branch uses up a mtc0 hazard nop slot and saves
- * a nop after the tlbw instruction.
- */
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
- tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
- break;
-
case CPU_RM7000:
uasm_i_nop(p);
uasm_i_nop(p);
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 80562b81f0f..74732177851 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
- .irq = MIPS_CPU_IRQ_BASE + 2,
+ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS,