summaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/barrier.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h124
-rw-r--r--arch/mips/include/asm/dma-mapping.h18
-rw-r--r--arch/mips/include/asm/dma.h1
-rw-r--r--arch/mips/include/asm/exec.h17
-rw-r--r--arch/mips/include/asm/highmem.h2
-rw-r--r--arch/mips/include/asm/irq.h5
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h1
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h20
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h3
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/nvram.h2
-rw-r--r--arch/mips/include/asm/mman.h4
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/mips/include/asm/pci.h9
-rw-r--r--arch/mips/include/asm/posix_types.h121
-rw-r--r--arch/mips/include/asm/processor.h7
-rw-r--r--arch/mips/include/asm/setup.h11
-rw-r--r--arch/mips/include/asm/socket.h4
-rw-r--r--arch/mips/include/asm/switch_to.h85
-rw-r--r--arch/mips/include/asm/system.h235
-rw-r--r--arch/mips/include/asm/txx9/jmr3927.h1
23 files changed, 300 insertions, 379 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 1d93f81d57e..3f4c5cb6433 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -18,8 +18,8 @@
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
+#include <asm/cmpxchg.h>
#include <asm/war.h>
-#include <asm/system.h>
#define ATOMIC_INIT(i) { (i) }
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index c0884f02d3a..f7fdc24e972 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -8,6 +8,8 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
+#include <asm/addrspace.h>
+
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index d8d1c2805ac..285a41fa0b1 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -9,6 +9,130 @@
#define __ASM_CMPXCHG_H
#include <linux/irqflags.h>
+#include <asm/war.h>
+
+static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
+{
+ __u32 retval;
+
+ smp_mb__before_llsc();
+
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " beqzl %2, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } else if (kernel_uses_llsc) {
+ unsigned long dummy;
+
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
+ } else {
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ raw_local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ smp_llsc_mb();
+
+ return retval;
+}
+
+#ifdef CONFIG_64BIT
+static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
+{
+ __u64 retval;
+
+ smp_mb__before_llsc();
+
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " beqzl %2, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } else if (kernel_uses_llsc) {
+ unsigned long dummy;
+
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
+ } else {
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ raw_local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ smp_llsc_mb();
+
+ return retval;
+}
+#else
+extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
+#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
+#endif
+
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
+ }
+
+ return x;
+}
+
+#define xchg(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
+ \
+ ((__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+})
#define __HAVE_ARCH_CMPXCHG 1
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 7aa37ddfca4..be39a12901c 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask)
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
- ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
- ops->free_coherent(dev, size, vaddr, dma_handle);
+ ops->free(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index 2d47da62d5a..f5097f65a8a 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -15,7 +15,6 @@
#include <asm/io.h> /* need byte IO */
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
-#include <asm/system.h>
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
new file mode 100644
index 00000000000..c1f6afa4bc4
--- /dev/null
+++ b/arch/mips/include/asm/exec.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 77e644082a3..2d91888c9b7 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page);
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 2354c870a63..fb698dc09bc 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,15 +11,12 @@
#include <linux/linkage.h>
#include <linux/smp.h>
+#include <linux/irqdomain.h>
#include <asm/mipsmtregs.h>
#include <irq.h>
-static inline void irq_dispose_mapping(unsigned int virq)
-{
-}
-
#ifdef CONFIG_I8259
static inline int irq_canonicalize(int irq)
{
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 1881b316ca4..4d6d77ed9b9 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,7 +20,7 @@
#define WORD_INSN ".word"
#endif
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\tnop\n\t"
"nop\n\t"
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index 59f5b55b220..ba4cf0e91c8 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -33,7 +33,6 @@
#include <linux/io.h> /* need byte IO */
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
-#include <asm/system.h>
#define NUM_AU1000_DMA_CHANNELS 8
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index 556e1be20bf..fb9975c74c5 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -11,6 +11,9 @@
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
+struct gpio;
+struct gpio_chip;
+
/* with the current GPIC design, up to 128 GPIOs are possible.
* The only implementation so far is in the Au1300, which has 75 externally
* available GPIOs.
@@ -203,7 +206,22 @@ static inline int gpio_request(unsigned int gpio, const char *label)
return 0;
}
-static inline void gpio_free(unsigned int gpio)
+static inline int gpio_request_one(unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ return 0;
+}
+
+static inline int gpio_request_array(struct gpio *array, size_t num)
+{
+ return 0;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+}
+
+static inline void gpio_free_array(struct gpio *array, size_t num)
{
}
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index de95e0723e2..5ecaf47b34d 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -44,4 +44,7 @@ union bcm47xx_bus {
extern union bcm47xx_bus bcm47xx_bus;
extern enum bcm47xx_bus_type bcm47xx_bus_type;
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
+void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
+
#endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h
index 184d5ecb5f5..69ef3efe06e 100644
--- a/arch/mips/include/asm/mach-bcm47xx/nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h
@@ -37,7 +37,7 @@ struct nvram_header {
extern int nvram_getenv(char *name, char *val, size_t val_len);
-static inline void nvram_parse_macaddr(char *buf, u8 *macaddr)
+static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
{
if (strchr(buf, ':'))
sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h
index 785b4ea4ec3..46d3da0d4b9 100644
--- a/arch/mips/include/asm/mman.h
+++ b/arch/mips/include/asm/mman.h
@@ -80,6 +80,10 @@
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index d41790928c6..da9bd7d270d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -39,9 +39,6 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#else /* !CONFIG_HUGETLB_PAGE */
-# ifndef BUILD_BUG
-# define BUILD_BUG() do { extern void __build_bug(void); __build_bug(); } while (0)
-# endif
#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
#define HPAGE_SIZE ({BUILD_BUG(); 0; })
#define HPAGE_MASK ({BUILD_BUG(); 0; })
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 576397c6992..fcd4060f642 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -92,6 +92,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
+#include <asm-generic/pci-bridge.h>
struct pci_dev;
@@ -112,12 +113,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region, struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region);
-
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
@@ -145,8 +140,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
#define arch_setup_msi_irqs arch_setup_msi_irqs
#endif
-extern int pci_probe_only;
-
extern char * (*pcibios_plat_setup)(char *str);
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h
index c200102c858..e0308dcca13 100644
--- a/arch/mips/include/asm/posix_types.h
+++ b/arch/mips/include/asm/posix_types.h
@@ -17,128 +17,21 @@
* assume GCC is being used.
*/
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-#if (_MIPS_SZLONG == 32)
-typedef unsigned long __kernel_nlink_t;
-#endif
#if (_MIPS_SZLONG == 64)
typedef unsigned int __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
#endif
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-#if (_MIPS_SZLONG == 32)
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-#endif
-#if (_MIPS_SZLONG == 64)
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#endif
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef long __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-typedef unsigned int __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
+typedef long __kernel_daddr_t;
+#define __kernel_daddr_t __kernel_daddr_t
-typedef struct {
#if (_MIPS_SZLONG == 32)
+typedef struct {
long val[2];
-#endif
-#if (_MIPS_SZLONG == 64)
- int val[2];
-#endif
} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+#endif
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index c104f1039a6..20e9dcf42b2 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -19,7 +19,6 @@
#include <asm/cpu-info.h>
#include <asm/mipsregs.h>
#include <asm/prefetch.h>
-#include <asm/system.h>
/*
* Return current * instruction pointer ("program counter").
@@ -356,6 +355,12 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define prefetchw(x) __builtin_prefetch((x), 1, 1)
+/*
+ * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
+ * systems.
+ */
+#define __ARCH_WANT_UNLOCKED_CTXSW
+
#endif
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 50511aac04e..6dce6d8d09a 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -5,6 +5,17 @@
#ifdef __KERNEL__
extern void setup_early_printk(void);
+
+extern void set_handler(unsigned long offset, void *addr, unsigned long len);
+extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
+
+typedef void (*vi_handler_t)(void);
+extern void *set_vi_handler(int n, vi_handler_t addr);
+
+extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
+extern void per_cpu_trap_init(void);
+
#endif /* __KERNEL__ */
#endif /* __SETUP_H */
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h
index ad5c0a7a02a..a2ed6fdad4e 100644
--- a/arch/mips/include/asm/socket.h
+++ b/arch/mips/include/asm/socket.h
@@ -84,6 +84,10 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
#ifdef __KERNEL__
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
new file mode 100644
index 00000000000..5d33621b565
--- /dev/null
+++ b/arch/mips/include/asm/switch_to.h
@@ -0,0 +1,85 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/cpu-features.h>
+#include <asm/watch.h>
+#include <asm/dsp.h>
+
+struct task_struct;
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+
+extern unsigned int ll_bit;
+extern struct task_struct *ll_task;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management. We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
+#define __mips_mt_fpaff_switch_to(prev) \
+do { \
+ struct thread_info *__prev_ti = task_thread_info(prev); \
+ \
+ if (cpu_has_fpu && \
+ test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
+ prev->cpus_allowed = prev->thread.user_cpus_allowed; \
+ } \
+ next->thread.emulated_fp = 0; \
+} while(0)
+
+#else
+#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
+#endif
+
+#define __clear_software_ll_bit() \
+do { \
+ if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
+ ll_bit = 0; \
+} while (0)
+
+#define switch_to(prev, next, last) \
+do { \
+ __mips_mt_fpaff_switch_to(prev); \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ __clear_software_ll_bit(); \
+ (last) = resume(prev, next, task_thread_info(next)); \
+} while (0)
+
+#define finish_arch_switch(prev) \
+do { \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
+ if (cpu_has_userlocal) \
+ write_c0_userlocal(current_thread_info()->tp_value); \
+ __restore_watch(); \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
deleted file mode 100644
index 6018c80ce37..00000000000
--- a/arch/mips/include/asm/system.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
- * Copyright (C) 1996 by Paul M. Antoine
- * Copyright (C) 1999 Silicon Graphics
- * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.
- */
-#ifndef _ASM_SYSTEM_H
-#define _ASM_SYSTEM_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/irqflags.h>
-
-#include <asm/addrspace.h>
-#include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-#include <asm/cpu-features.h>
-#include <asm/dsp.h>
-#include <asm/watch.h>
-#include <asm/war.h>
-
-
-/*
- * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
- */
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
-
-struct task_struct;
-
-extern unsigned int ll_bit;
-extern struct task_struct *ll_task;
-
-#ifdef CONFIG_MIPS_MT_FPAFF
-
-/*
- * Handle the scheduler resume end of FPU affinity management. We do this
- * inline to try to keep the overhead down. If we have been forced to run on
- * a "CPU" with an FPU because of a previous high level of FP computation,
- * but did not actually use the FPU during the most recent time-slice (CU1
- * isn't set), we undo the restriction on cpus_allowed.
- *
- * We're not calling set_cpus_allowed() here, because we have no need to
- * force prompt migration - we're already switching the current CPU to a
- * different thread.
- */
-
-#define __mips_mt_fpaff_switch_to(prev) \
-do { \
- struct thread_info *__prev_ti = task_thread_info(prev); \
- \
- if (cpu_has_fpu && \
- test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
- (!(KSTK_STATUS(prev) & ST0_CU1))) { \
- clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
- } \
- next->thread.emulated_fp = 0; \
-} while(0)
-
-#else
-#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
-#endif
-
-#define __clear_software_ll_bit() \
-do { \
- if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
- ll_bit = 0; \
-} while (0)
-
-#define switch_to(prev, next, last) \
-do { \
- __mips_mt_fpaff_switch_to(prev); \
- if (cpu_has_dsp) \
- __save_dsp(prev); \
- __clear_software_ll_bit(); \
- (last) = resume(prev, next, task_thread_info(next)); \
-} while (0)
-
-#define finish_arch_switch(prev) \
-do { \
- if (cpu_has_dsp) \
- __restore_dsp(current); \
- if (cpu_has_userlocal) \
- write_c0_userlocal(current_thread_info()->tp_value); \
- __restore_watch(); \
-} while (0)
-
-static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
-{
- __u32 retval;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long dummy;
-
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set mips3 \n"
- " sc %2, %1 \n"
- " beqzl %2, 1b \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long dummy;
-
- do {
- __asm__ __volatile__(
- " .set mips3 \n"
- " ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set mips3 \n"
- " sc %2, %1 \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
- } while (unlikely(!dummy));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- retval = *m;
- *m = val;
- raw_local_irq_restore(flags); /* implies memory barrier */
- }
-
- smp_llsc_mb();
-
- return retval;
-}
-
-#ifdef CONFIG_64BIT
-static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
-{
- __u64 retval;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long dummy;
-
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " beqzl %2, 1b \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long dummy;
-
- do {
- __asm__ __volatile__(
- " .set mips3 \n"
- " lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
- } while (unlikely(!dummy));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- retval = *m;
- *m = val;
- raw_local_irq_restore(flags); /* implies memory barrier */
- }
-
- smp_llsc_mb();
-
- return retval;
-}
-#else
-extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
-#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
-#endif
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
-
- return x;
-}
-
-#define xchg(ptr, x) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
- \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
-})
-
-extern void set_handler(unsigned long offset, void *addr, unsigned long len);
-extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
-
-typedef void (*vi_handler_t)(void);
-extern void *set_vi_handler(int n, vi_handler_t addr);
-
-extern void *set_except_vector(int n, void *addr);
-extern unsigned long ebase;
-extern void per_cpu_trap_init(void);
-
-/*
- * See include/asm-ia64/system.h; prevents deadlock on SMP
- * systems.
- */
-#define __ARCH_WANT_UNLOCKED_CTXSW
-
-extern unsigned long arch_align_stack(unsigned long sp);
-
-#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h
index a409c446bf1..8808d7f82da 100644
--- a/arch/mips/include/asm/txx9/jmr3927.h
+++ b/arch/mips/include/asm/txx9/jmr3927.h
@@ -12,7 +12,6 @@
#include <asm/txx9/tx3927.h>
#include <asm/addrspace.h>
-#include <asm/system.h>
#include <asm/txx9irq.h>
/* CS */