diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/auxvec.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/cacheflush.h | 7 | ||||
-rw-r--r-- | include/asm-sparc64/compat.h | 18 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/fcntl.h | 46 | ||||
-rw-r--r-- | include/asm-sparc64/futex.h | 53 | ||||
-rw-r--r-- | include/asm-sparc64/hardirq.h | 16 | ||||
-rw-r--r-- | include/asm-sparc64/hdreg.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/ide.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/io.h | 47 | ||||
-rw-r--r-- | include/asm-sparc64/page.h | 23 | ||||
-rw-r--r-- | include/asm-sparc64/pci.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/pgalloc.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 23 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock.h | 160 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock_types.h | 20 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 49 | ||||
-rw-r--r-- | include/asm-sparc64/types.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/uaccess.h | 6 |
19 files changed, 219 insertions, 264 deletions
diff --git a/include/asm-sparc64/auxvec.h b/include/asm-sparc64/auxvec.h new file mode 100644 index 00000000000..436a2912982 --- /dev/null +++ b/include/asm-sparc64/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __ASM_SPARC64_AUXVEC_H +#define __ASM_SPARC64_AUXVEC_H + +#endif /* !(__ASM_SPARC64_AUXVEC_H) */ diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h index 51b26e81d82..ededd2659ea 100644 --- a/include/asm-sparc64/cacheflush.h +++ b/include/asm-sparc64/cacheflush.h @@ -4,13 +4,6 @@ #include <linux/config.h> #include <asm/page.h> -/* Flushing for D-cache alias handling is only needed if - * the page size is smaller than 16K. - */ -#if PAGE_SHIFT < 14 -#define DCACHE_ALIASING_POSSIBLE -#endif - #ifndef __ASSEMBLY__ #include <linux/mm.h> diff --git a/include/asm-sparc64/compat.h b/include/asm-sparc64/compat.h index b59122dd176..c73935dc7ba 100644 --- a/include/asm-sparc64/compat.h +++ b/include/asm-sparc64/compat.h @@ -12,8 +12,10 @@ typedef s32 compat_ssize_t; typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; -typedef u16 compat_uid_t; -typedef u16 compat_gid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; typedef u32 compat_ino_t; typedef u16 compat_dev_t; @@ -47,8 +49,8 @@ struct compat_stat { compat_ino_t st_ino; compat_mode_t st_mode; compat_nlink_t st_nlink; - compat_uid_t st_uid; - compat_gid_t st_gid; + __compat_uid_t st_uid; + __compat_gid_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; compat_time_t st_atime; @@ -177,10 +179,10 @@ static __inline__ void __user *compat_alloc_user_space(long len) struct compat_ipc64_perm { compat_key_t key; - __kernel_uid_t uid; - __kernel_gid_t gid; - __kernel_uid_t cuid; - __kernel_gid_t cgid; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; unsigned short __pad1; compat_mode_t mode; unsigned short __pad2; diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index cc7198aaac5..9a3a81f1cc5 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -1,6 +1,6 @@ /* cpudata.h: Per-cpu parameters. * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) */ #ifndef _SPARC64_CPUDATA_H @@ -10,7 +10,7 @@ typedef struct { /* Dcache line 1 */ - unsigned int __pad0; /* bh_count moved to irq_stat for consistency. KAO */ + unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int multiplier; unsigned int counter; unsigned int idle_volume; diff --git a/include/asm-sparc64/fcntl.h b/include/asm-sparc64/fcntl.h index e36def0d0d8..b2aecf0054b 100644 --- a/include/asm-sparc64/fcntl.h +++ b/include/asm-sparc64/fcntl.h @@ -4,10 +4,6 @@ /* open/fcntl - O_SYNC is only implemented on blocks devices and on files located on an ext2 file system */ -#define O_RDONLY 0x0000 -#define O_WRONLY 0x0001 -#define O_RDWR 0x0002 -#define O_ACCMODE 0x0003 #define O_NDELAY 0x0004 #define O_APPEND 0x0008 #define FASYNC 0x0040 /* fcntl, for BSD compatibility */ @@ -17,62 +13,24 @@ #define O_SYNC 0x2000 #define O_NONBLOCK 0x4000 #define O_NOCTTY 0x8000 /* not fcntl */ -#define O_DIRECTORY 0x10000 /* must be a directory */ -#define O_NOFOLLOW 0x20000 /* don't follow links */ #define O_LARGEFILE 0x40000 #define O_DIRECT 0x100000 /* direct disk access hint */ #define O_NOATIME 0x200000 -#define F_DUPFD 0 /* dup */ -#define F_GETFD 1 /* get close_on_exec */ -#define F_SETFD 2 /* set/clear close_on_exec */ -#define F_GETFL 3 /* get file->f_flags */ -#define F_SETFL 4 /* set file->f_flags */ #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ #define F_GETLK 7 #define F_SETLK 8 #define F_SETLKW 9 -#define F_SETSIG 10 /* for sockets. */ -#define F_GETSIG 11 /* for sockets. */ - -/* for F_[GET|SET]FL */ -#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ /* for posix fcntl() and lockf() */ #define F_RDLCK 1 #define F_WRLCK 2 #define F_UNLCK 3 -/* for old implementation of bsd flock () */ -#define F_EXLCK 4 /* or 3 */ -#define F_SHLCK 8 /* or 4 */ - -/* for leases */ -#define F_INPROGRESS 16 - -/* operations for bsd flock(), also used by the kernel implementation */ -#define LOCK_SH 1 /* shared lock */ -#define LOCK_EX 2 /* exclusive lock */ -#define LOCK_NB 4 /* or'd with one of the above to prevent - blocking */ -#define LOCK_UN 8 /* remove lock */ - -#define LOCK_MAND 32 /* This is a mandatory flock */ -#define LOCK_READ 64 /* ... Which allows concurrent read operations */ -#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ -#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ - -struct flock { - short l_type; - short l_whence; - off_t l_start; - off_t l_len; - pid_t l_pid; - short __unused; -}; +#define __ARCH_FLOCK_PAD short __unused; -#define F_LINUX_SPECIFIC_BASE 1024 +#include <asm-generic/fcntl.h> #endif /* !(_SPARC64_FCNTL_H) */ diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h new file mode 100644 index 00000000000..9feff4ce142 --- /dev/null +++ b/include/asm-sparc64/futex.h @@ -0,0 +1,53 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <asm/errno.h> +#include <asm/uaccess.h> + +static inline int +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + case FUTEX_OP_ADD: + case FUTEX_OP_OR: + case FUTEX_OP_ANDN: + case FUTEX_OP_XOR: + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +#endif +#endif diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index d6db1aed764..f0cf71376ec 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -1,22 +1,16 @@ /* hardirq.h: 64-bit Sparc hard IRQ support. * - * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net) */ #ifndef __SPARC64_HARDIRQ_H #define __SPARC64_HARDIRQ_H -#include <linux/config.h> -#include <linux/threads.h> -#include <linux/spinlock.h> -#include <linux/cache.h> +#include <asm/cpudata.h> -/* rtrap.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#define __ARCH_IRQ_STAT +#define local_softirq_pending() \ + (local_cpu_data().__softirq_pending) #define HARDIRQ_BITS 8 diff --git a/include/asm-sparc64/hdreg.h b/include/asm-sparc64/hdreg.h deleted file mode 100644 index 7f7fd1af0af..00000000000 --- a/include/asm-sparc64/hdreg.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/hdreg.h> diff --git a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h index 4c1098474c7..c393f815b0b 100644 --- a/include/asm-sparc64/ide.h +++ b/include/asm-sparc64/ide.h @@ -15,6 +15,7 @@ #include <asm/io.h> #include <asm/spitfire.h> #include <asm/cacheflush.h> +#include <asm/page.h> #ifndef MAX_HWIFS # ifdef CONFIG_BLK_DEV_IDEPCI diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index afdcea90707..0056770e83a 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h @@ -100,18 +100,41 @@ static __inline__ void _outl(u32 l, unsigned long addr) #define inl_p(__addr) inl(__addr) #define outl_p(__l, __addr) outl(__l, __addr) -extern void outsb(void __iomem *addr, const void *src, unsigned long count); -extern void outsw(void __iomem *addr, const void *src, unsigned long count); -extern void outsl(void __iomem *addr, const void *src, unsigned long count); -extern void insb(void __iomem *addr, void *dst, unsigned long count); -extern void insw(void __iomem *addr, void *dst, unsigned long count); -extern void insl(void __iomem *addr, void *dst, unsigned long count); -#define ioread8_rep(a,d,c) insb(a,d,c) -#define ioread16_rep(a,d,c) insw(a,d,c) -#define ioread32_rep(a,d,c) insl(a,d,c) -#define iowrite8_rep(a,s,c) outsb(a,s,c) -#define iowrite16_rep(a,s,c) outsw(a,s,c) -#define iowrite32_rep(a,s,c) outsl(a,s,c) +extern void outsb(unsigned long, const void *, unsigned long); +extern void outsw(unsigned long, const void *, unsigned long); +extern void outsl(unsigned long, const void *, unsigned long); +extern void insb(unsigned long, void *, unsigned long); +extern void insw(unsigned long, void *, unsigned long); +extern void insl(unsigned long, void *, unsigned long); + +static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) +{ + insb((unsigned long __force)port, buf, count); +} +static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count) +{ + insw((unsigned long __force)port, buf, count); +} + +static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count) +{ + insl((unsigned long __force)port, buf, count); +} + +static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsb((unsigned long __force)port, buf, count); +} + +static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsw((unsigned long __force)port, buf, count); +} + +static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsl((unsigned long __force)port, buf, count); +} /* Memory functions, same as I/O accesses on Ultra. */ static inline u8 _readb(const volatile void __iomem *addr) diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index b87dbbd64bc..7f8d764abc4 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -21,6 +21,13 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +/* Flushing for D-cache alias handling is only needed if + * the page size is smaller than 16K. + */ +#if PAGE_SHIFT < 14 +#define DCACHE_ALIASING_POSSIBLE +#endif + #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -150,20 +157,6 @@ struct sparc_phys_banks { extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; -/* Pure 2^n version of get_order */ -static __inline__ int get_order(unsigned long size) -{ - int order; - - size = (size-1) >> (PAGE_SHIFT-1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} - #endif /* !(__ASSEMBLY__) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ @@ -171,4 +164,6 @@ static __inline__ int get_order(unsigned long size) #endif /* !(__KERNEL__) */ +#include <asm-generic/page.h> + #endif /* !(_SPARC64_PAGE_H) */ diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index a4ab0ec7143..89bd71b1c0d 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h @@ -269,6 +269,8 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); +extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *); + static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index b9b1914aae6..a96067cca96 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -10,6 +10,7 @@ #include <asm/spitfire.h> #include <asm/cpudata.h> #include <asm/cacheflush.h> +#include <asm/page.h> /* Page table allocation/freeing. */ #ifdef CONFIG_SMP diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 1ae00c5087f..a297f6144f0 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -24,21 +24,23 @@ #include <asm/processor.h> #include <asm/const.h> -/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB). - * The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB). +/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). + * The page copy blockops can use 0x2000000 to 0x10000000. * The PROM resides in an area spanning 0xf0000000 to 0x100000000. - * The vmalloc area spans 0x140000000 to 0x200000000. + * The vmalloc area spans 0x100000000 to 0x200000000. + * Since modules need to be in the lowest 32-bits of the address space, + * we place them right before the OBP area from 0x10000000 to 0xf0000000. * There is a single static kernel PMD which maps from 0x0 to address * 0x400000000. */ -#define TLBTEMP_BASE _AC(0x0000000001000000,UL) -#define MODULES_VADDR _AC(0x0000000002000000,UL) -#define MODULES_LEN _AC(0x000000007e000000,UL) -#define MODULES_END _AC(0x0000000080000000,UL) -#define VMALLOC_START _AC(0x0000000140000000,UL) -#define VMALLOC_END _AC(0x0000000200000000,UL) +#define TLBTEMP_BASE _AC(0x0000000002000000,UL) +#define MODULES_VADDR _AC(0x0000000010000000,UL) +#define MODULES_LEN _AC(0x00000000e0000000,UL) +#define MODULES_END _AC(0x00000000f0000000,UL) #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) +#define VMALLOC_START _AC(0x0000000100000000,UL) +#define VMALLOC_END _AC(0x0000000200000000,UL) /* XXX All of this needs to be rethought so we can take advantage * XXX cheetah's full 64-bit virtual address space, ie. no more hole @@ -410,9 +412,6 @@ extern unsigned long *sparc64_valid_addr_bitmap; #define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) -extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long offset, - unsigned long size, pgprot_t prot, int space); extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index a02c4370eb4..ec85d12d73b 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -29,24 +29,13 @@ * must be pre-V9 branches. */ -#ifndef CONFIG_DEBUG_SPINLOCK +#define __raw_spin_is_locked(lp) ((lp)->lock != 0) -typedef struct { - volatile unsigned char lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} +#define __raw_spin_unlock_wait(lp) \ + do { rmb(); \ + } while((lp)->lock) -#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) -#define spin_is_locked(lp) ((lp)->lock != 0) - -#define spin_unlock_wait(lp) \ -do { rmb(); \ -} while((lp)->lock) - -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { unsigned long tmp; @@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) : "memory"); } -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { unsigned long result; @@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) return (result == 0UL); } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( " membar #StoreStore | #LoadStore\n" @@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) : "memory"); } -static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) : "memory"); } -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - volatile unsigned char lock; - unsigned int owner_pc, owner_cpu; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } -#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) -#define spin_is_locked(__lock) ((__lock)->lock != 0) -#define spin_unlock_wait(__lock) \ -do { \ - rmb(); \ -} while((__lock)->lock) - -extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); -extern void _do_spin_unlock(spinlock_t *lock); -extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); - -#define _raw_spin_trylock(lp) \ - _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) -#define _raw_spin_lock(lock) \ - _do_spin_lock(lock, "spin_lock", \ - (unsigned long) __builtin_return_address(0)) -#define _raw_spin_unlock(lock) _do_spin_unlock(lock) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - -#endif /* CONFIG_DEBUG_SPINLOCK */ - /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -#ifndef CONFIG_DEBUG_SPINLOCK - -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) {0,} -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) - -static void inline __read_lock(rwlock_t *lock) +static void inline __read_lock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) : "memory"); } -static void inline __read_unlock(rwlock_t *lock) +static void inline __read_unlock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) : "memory"); } -static void inline __write_lock(rwlock_t *lock) +static void inline __write_lock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) : "memory"); } -static void inline __write_unlock(rwlock_t *lock) +static void inline __write_unlock(raw_rwlock_t *lock) { __asm__ __volatile__( " membar #LoadStore | #StoreStore\n" @@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) : "memory"); } -static int inline __write_trylock(rwlock_t *lock) +static int inline __write_trylock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) return result; } -#define _raw_read_lock(p) __read_lock(p) -#define _raw_read_unlock(p) __read_unlock(p) -#define _raw_write_lock(p) __write_lock(p) -#define _raw_write_unlock(p) __write_unlock(p) -#define _raw_write_trylock(p) __write_trylock(p) - -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - volatile unsigned long lock; - unsigned int writer_pc, writer_cpu; - unsigned int reader_pc[NR_CPUS]; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) - -extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); -extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); -extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); -extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); -extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); - -#define _raw_read_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_lock(lock, "read_lock", \ - (unsigned long) __builtin_return_address(0)); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_read_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_unlock(lock, "read_unlock", \ - (unsigned long) __builtin_return_address(0)); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_lock(lock, "write_lock", \ - (unsigned long) __builtin_return_address(0)); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_unlock(lock, \ - (unsigned long) __builtin_return_address(0)); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_trylock(lock) \ -({ unsigned long flags; \ - int val; \ - local_irq_save(flags); \ - val = _do_write_trylock(lock, "write_trylock", \ - (unsigned long) __builtin_return_address(0)); \ - local_irq_restore(flags); \ - val; \ -}) - -#endif /* CONFIG_DEBUG_SPINLOCK */ - -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) -#define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define write_can_lock(rw) (!(rw)->lock) +#define __raw_read_lock(p) __read_lock(p) +#define __raw_read_unlock(p) __read_unlock(p) +#define __raw_write_lock(p) __write_lock(p) +#define __raw_write_unlock(p) __write_unlock(p) +#define __raw_write_trylock(p) __write_trylock(p) + +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) +#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define __raw_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 00000000000..e128112a0d7 --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef __SPARC64_SPINLOCK_TYPES_H +#define __SPARC64_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned char lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 5e94c05dc2f..b5417529f6f 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -28,13 +28,48 @@ enum sparc_cpu { #define ARCH_SUN4C_SUN4 0 #define ARCH_SUN4 0 -extern void mb(void); -extern void rmb(void); -extern void wmb(void); -extern void membar_storeload(void); -extern void membar_storeload_storestore(void); -extern void membar_storeload_loadload(void); -extern void membar_storestore_loadstore(void); +/* These are here in an effort to more fully work around Spitfire Errata + * #51. Essentially, if a memory barrier occurs soon after a mispredicted + * branch, the chip can stop executing instructions until a trap occurs. + * Therefore, if interrupts are disabled, the chip can hang forever. + * + * It used to be believed that the memory barrier had to be right in the + * delay slot, but a case has been traced recently wherein the memory barrier + * was one instruction after the branch delay slot and the chip still hung. + * The offending sequence was the following in sym_wakeup_done() of the + * sym53c8xx_2 driver: + * + * call sym_ccb_from_dsa, 0 + * movge %icc, 0, %l0 + * brz,pn %o0, .LL1303 + * mov %o0, %l2 + * membar #LoadLoad + * + * The branch has to be mispredicted for the bug to occur. Therefore, we put + * the memory barrier explicitly into a "branch always, predicted taken" + * delay slot to avoid the problem case. + */ +#define membar_safe(type) \ +do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + " membar " type "\n" \ + "1:\n" \ + : : : "memory"); \ +} while (0) + +#define mb() \ + membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") +#define rmb() \ + membar_safe("#LoadLoad") +#define wmb() \ + membar_safe("#StoreStore") +#define membar_storeload() \ + membar_safe("#StoreLoad") +#define membar_storeload_storestore() \ + membar_safe("#StoreLoad | #StoreStore") +#define membar_storeload_loadload() \ + membar_safe("#StoreLoad | #LoadLoad") +#define membar_storestore_loadstore() \ + membar_safe("#StoreStore | #LoadStore") #endif diff --git a/include/asm-sparc64/types.h b/include/asm-sparc64/types.h index 6248ed1a9a7..d0ee7f10583 100644 --- a/include/asm-sparc64/types.h +++ b/include/asm-sparc64/types.h @@ -56,8 +56,6 @@ typedef unsigned long u64; typedef u32 dma_addr_t; typedef u64 dma64_addr_t; -typedef unsigned short kmem_bufctl_t; - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 5690142f82d..80a65d7e3db 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -59,12 +59,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si return 1; } -/* this function will go away soon - use access_ok() instead */ -static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) -{ - return 0; -} - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is |