diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/atomic.h | 8 | ||||
-rw-r--r-- | include/asm-sparc64/auxvec.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/bitops.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/compat.h | 18 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/fcntl.h | 46 | ||||
-rw-r--r-- | include/asm-sparc64/futex.h | 53 | ||||
-rw-r--r-- | include/asm-sparc64/hardirq.h | 16 | ||||
-rw-r--r-- | include/asm-sparc64/hdreg.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/io.h | 47 | ||||
-rw-r--r-- | include/asm-sparc64/page.h | 16 | ||||
-rw-r--r-- | include/asm-sparc64/pci.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 3 | ||||
-rw-r--r-- | include/asm-sparc64/processor.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/segment.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/sfafsr.h | 82 | ||||
-rw-r--r-- | include/asm-sparc64/socket.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock.h | 152 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock_types.h | 20 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 52 | ||||
-rw-r--r-- | include/asm-sparc64/types.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/uaccess.h | 6 |
22 files changed, 292 insertions, 253 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index d80f3379669..e175afcf2cd 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -72,10 +72,10 @@ extern int atomic64_sub_ret(int, atomic64_t *); /* Atomic operations are already serializing */ #ifdef CONFIG_SMP -#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore") -#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore") +#define smp_mb__before_atomic_dec() membar_storeload_loadload(); +#define smp_mb__after_atomic_dec() membar_storeload_storestore(); +#define smp_mb__before_atomic_inc() membar_storeload_loadload(); +#define smp_mb__after_atomic_inc() membar_storeload_storestore(); #else #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-sparc64/auxvec.h b/include/asm-sparc64/auxvec.h new file mode 100644 index 00000000000..436a2912982 --- /dev/null +++ b/include/asm-sparc64/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __ASM_SPARC64_AUXVEC_H +#define __ASM_SPARC64_AUXVEC_H + +#endif /* !(__ASM_SPARC64_AUXVEC_H) */ diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 9c5e7197028..6388b8376c5 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -72,8 +72,8 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) } #ifdef CONFIG_SMP -#define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore") +#define smp_mb__before_clear_bit() membar_storeload_loadload() +#define smp_mb__after_clear_bit() membar_storeload_storestore() #else #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() diff --git a/include/asm-sparc64/compat.h b/include/asm-sparc64/compat.h index b59122dd176..c73935dc7ba 100644 --- a/include/asm-sparc64/compat.h +++ b/include/asm-sparc64/compat.h @@ -12,8 +12,10 @@ typedef s32 compat_ssize_t; typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; -typedef u16 compat_uid_t; -typedef u16 compat_gid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; typedef u32 compat_ino_t; typedef u16 compat_dev_t; @@ -47,8 +49,8 @@ struct compat_stat { compat_ino_t st_ino; compat_mode_t st_mode; compat_nlink_t st_nlink; - compat_uid_t st_uid; - compat_gid_t st_gid; + __compat_uid_t st_uid; + __compat_gid_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; compat_time_t st_atime; @@ -177,10 +179,10 @@ static __inline__ void __user *compat_alloc_user_space(long len) struct compat_ipc64_perm { compat_key_t key; - __kernel_uid_t uid; - __kernel_gid_t gid; - __kernel_uid_t cuid; - __kernel_gid_t cgid; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; unsigned short __pad1; compat_mode_t mode; unsigned short __pad2; diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index cc7198aaac5..9a3a81f1cc5 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -1,6 +1,6 @@ /* cpudata.h: Per-cpu parameters. * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) */ #ifndef _SPARC64_CPUDATA_H @@ -10,7 +10,7 @@ typedef struct { /* Dcache line 1 */ - unsigned int __pad0; /* bh_count moved to irq_stat for consistency. KAO */ + unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int multiplier; unsigned int counter; unsigned int idle_volume; diff --git a/include/asm-sparc64/fcntl.h b/include/asm-sparc64/fcntl.h index e36def0d0d8..b2aecf0054b 100644 --- a/include/asm-sparc64/fcntl.h +++ b/include/asm-sparc64/fcntl.h @@ -4,10 +4,6 @@ /* open/fcntl - O_SYNC is only implemented on blocks devices and on files located on an ext2 file system */ -#define O_RDONLY 0x0000 -#define O_WRONLY 0x0001 -#define O_RDWR 0x0002 -#define O_ACCMODE 0x0003 #define O_NDELAY 0x0004 #define O_APPEND 0x0008 #define FASYNC 0x0040 /* fcntl, for BSD compatibility */ @@ -17,62 +13,24 @@ #define O_SYNC 0x2000 #define O_NONBLOCK 0x4000 #define O_NOCTTY 0x8000 /* not fcntl */ -#define O_DIRECTORY 0x10000 /* must be a directory */ -#define O_NOFOLLOW 0x20000 /* don't follow links */ #define O_LARGEFILE 0x40000 #define O_DIRECT 0x100000 /* direct disk access hint */ #define O_NOATIME 0x200000 -#define F_DUPFD 0 /* dup */ -#define F_GETFD 1 /* get close_on_exec */ -#define F_SETFD 2 /* set/clear close_on_exec */ -#define F_GETFL 3 /* get file->f_flags */ -#define F_SETFL 4 /* set file->f_flags */ #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ #define F_GETLK 7 #define F_SETLK 8 #define F_SETLKW 9 -#define F_SETSIG 10 /* for sockets. */ -#define F_GETSIG 11 /* for sockets. */ - -/* for F_[GET|SET]FL */ -#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ /* for posix fcntl() and lockf() */ #define F_RDLCK 1 #define F_WRLCK 2 #define F_UNLCK 3 -/* for old implementation of bsd flock () */ -#define F_EXLCK 4 /* or 3 */ -#define F_SHLCK 8 /* or 4 */ - -/* for leases */ -#define F_INPROGRESS 16 - -/* operations for bsd flock(), also used by the kernel implementation */ -#define LOCK_SH 1 /* shared lock */ -#define LOCK_EX 2 /* exclusive lock */ -#define LOCK_NB 4 /* or'd with one of the above to prevent - blocking */ -#define LOCK_UN 8 /* remove lock */ - -#define LOCK_MAND 32 /* This is a mandatory flock */ -#define LOCK_READ 64 /* ... Which allows concurrent read operations */ -#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ -#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ - -struct flock { - short l_type; - short l_whence; - off_t l_start; - off_t l_len; - pid_t l_pid; - short __unused; -}; +#define __ARCH_FLOCK_PAD short __unused; -#define F_LINUX_SPECIFIC_BASE 1024 +#include <asm-generic/fcntl.h> #endif /* !(_SPARC64_FCNTL_H) */ diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h new file mode 100644 index 00000000000..2cac5ecd9d0 --- /dev/null +++ b/include/asm-sparc64/futex.h @@ -0,0 +1,53 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <asm/errno.h> +#include <asm/uaccess.h> + +static inline int +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tem; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + case FUTEX_OP_ADD: + case FUTEX_OP_OR: + case FUTEX_OP_ANDN: + case FUTEX_OP_XOR: + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +#endif +#endif diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index d6db1aed764..f0cf71376ec 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -1,22 +1,16 @@ /* hardirq.h: 64-bit Sparc hard IRQ support. * - * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net) */ #ifndef __SPARC64_HARDIRQ_H #define __SPARC64_HARDIRQ_H -#include <linux/config.h> -#include <linux/threads.h> -#include <linux/spinlock.h> -#include <linux/cache.h> +#include <asm/cpudata.h> -/* rtrap.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#define __ARCH_IRQ_STAT +#define local_softirq_pending() \ + (local_cpu_data().__softirq_pending) #define HARDIRQ_BITS 8 diff --git a/include/asm-sparc64/hdreg.h b/include/asm-sparc64/hdreg.h deleted file mode 100644 index 7f7fd1af0af..00000000000 --- a/include/asm-sparc64/hdreg.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/hdreg.h> diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index afdcea90707..0056770e83a 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h @@ -100,18 +100,41 @@ static __inline__ void _outl(u32 l, unsigned long addr) #define inl_p(__addr) inl(__addr) #define outl_p(__l, __addr) outl(__l, __addr) -extern void outsb(void __iomem *addr, const void *src, unsigned long count); -extern void outsw(void __iomem *addr, const void *src, unsigned long count); -extern void outsl(void __iomem *addr, const void *src, unsigned long count); -extern void insb(void __iomem *addr, void *dst, unsigned long count); -extern void insw(void __iomem *addr, void *dst, unsigned long count); -extern void insl(void __iomem *addr, void *dst, unsigned long count); -#define ioread8_rep(a,d,c) insb(a,d,c) -#define ioread16_rep(a,d,c) insw(a,d,c) -#define ioread32_rep(a,d,c) insl(a,d,c) -#define iowrite8_rep(a,s,c) outsb(a,s,c) -#define iowrite16_rep(a,s,c) outsw(a,s,c) -#define iowrite32_rep(a,s,c) outsl(a,s,c) +extern void outsb(unsigned long, const void *, unsigned long); +extern void outsw(unsigned long, const void *, unsigned long); +extern void outsl(unsigned long, const void *, unsigned long); +extern void insb(unsigned long, void *, unsigned long); +extern void insw(unsigned long, void *, unsigned long); +extern void insl(unsigned long, void *, unsigned long); + +static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) +{ + insb((unsigned long __force)port, buf, count); +} +static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count) +{ + insw((unsigned long __force)port, buf, count); +} + +static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count) +{ + insl((unsigned long __force)port, buf, count); +} + +static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsb((unsigned long __force)port, buf, count); +} + +static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsw((unsigned long __force)port, buf, count); +} + +static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsl((unsigned long __force)port, buf, count); +} /* Memory functions, same as I/O accesses on Ultra. */ static inline u8 _readb(const volatile void __iomem *addr) diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index b87dbbd64bc..c9f8ef208ea 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -150,20 +150,6 @@ struct sparc_phys_banks { extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; -/* Pure 2^n version of get_order */ -static __inline__ int get_order(unsigned long size) -{ - int order; - - size = (size-1) >> (PAGE_SHIFT-1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} - #endif /* !(__ASSEMBLY__) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ @@ -171,4 +157,6 @@ static __inline__ int get_order(unsigned long size) #endif /* !(__KERNEL__) */ +#include <asm-generic/page.h> + #endif /* !(_SPARC64_PAGE_H) */ diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index a4ab0ec7143..89bd71b1c0d 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h @@ -269,6 +269,8 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); +extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *); + static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 1ae00c5087f..a2b4f5ed462 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -410,9 +410,6 @@ extern unsigned long *sparc64_valid_addr_bitmap; #define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) -extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long offset, - unsigned long size, pgprot_t prot, int space); extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index d0bee241356..3169f3e2237 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -18,7 +18,6 @@ #include <asm/a.out.h> #include <asm/pstate.h> #include <asm/ptrace.h> -#include <asm/segment.h> #include <asm/page.h> /* The sparc has no problems with write protection */ diff --git a/include/asm-sparc64/segment.h b/include/asm-sparc64/segment.h deleted file mode 100644 index b03e709fc94..00000000000 --- a/include/asm-sparc64/segment.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC64_SEGMENT_H -#define __SPARC64_SEGMENT_H - -/* Only here because we have some old header files that expect it.. */ - -#endif diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h new file mode 100644 index 00000000000..2f792c20b53 --- /dev/null +++ b/include/asm-sparc64/sfafsr.h @@ -0,0 +1,82 @@ +#ifndef _SPARC64_SFAFSR_H +#define _SPARC64_SFAFSR_H + +#include <asm/const.h> + +/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */ + +#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT) +#define SFAFSR_ME_SHIFT 32 +#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT) +#define SFAFSR_PRIV_SHIFT 31 +#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT) +#define SFAFSR_ISAP_SHIFT 30 +#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT) +#define SFAFSR_ETP_SHIFT 29 +#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT) +#define SFAFSR_IVUE_SHIFT 28 +#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT) +#define SFAFSR_TO_SHIFT 27 +#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT) +#define SFAFSR_BERR_SHIFT 26 +#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT) +#define SFAFSR_LDP_SHIFT 25 +#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT) +#define SFAFSR_CP_SHIFT 24 +#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT) +#define SFAFSR_WP_SHIFT 23 +#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT) +#define SFAFSR_EDP_SHIFT 22 +#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT) +#define SFAFSR_UE_SHIFT 21 +#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT) +#define SFAFSR_CE_SHIFT 20 +#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT) +#define SFAFSR_ETS_SHIFT 16 +#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT) +#define SFAFSR_PSYND_SHIFT 0 + +/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read + * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write + */ + +#define UDBE_UE (_AC(1,UL) << 9) +#define UDBE_CE (_AC(1,UL) << 8) +#define UDBE_E_SYNDR (_AC(0xff,UL) << 0) + +/* The trap handlers for asynchronous errors encode the AFSR and + * other pieces of information into a 64-bit argument for C code + * encoded as follows: + * + * ----------------------------------------------- + * | UDB_H | UDB_L | TL>1 | TT | AFSR | + * ----------------------------------------------- + * 63 54 53 44 42 41 33 32 0 + * + * The AFAR is passed in unchanged. + */ +#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) +#define SFSTAT_UDBH_SHIFT 54 +#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) +#define SFSTAT_UDBL_SHIFT 44 +#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT) +#define SFSTAT_TL_GT_ONE_SHIFT 42 +#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT) +#define SFSTAT_TRAP_TYPE_SHIFT 33 +#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT) +#define SFSTAT_AFSR_SHIFT 0 + +/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */ +#define ESTATE_ERR_CE 0x1 /* Correctable errors */ +#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */ +#define ESTATE_ERR_ISAP 0x4 /* System address parity error */ +#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \ + ESTATE_ERR_NCE | \ + ESTATE_ERR_ISAP) + +/* The various trap types that report using the above state. */ +#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */ +#define TRAP_TYPE_DAE 0x32 /* Data Access Error */ +#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */ + +#endif /* _SPARC64_SFAFSR_H */ diff --git a/include/asm-sparc64/socket.h b/include/asm-sparc64/socket.h index 865547a2390..59987dad335 100644 --- a/include/asm-sparc64/socket.h +++ b/include/asm-sparc64/socket.h @@ -29,6 +29,8 @@ #define SO_SNDBUF 0x1001 #define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index 9cb93a5c2b4..ec85d12d73b 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -29,24 +29,13 @@ * must be pre-V9 branches. */ -#ifndef CONFIG_DEBUG_SPINLOCK +#define __raw_spin_is_locked(lp) ((lp)->lock != 0) -typedef struct { - volatile unsigned char lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} +#define __raw_spin_unlock_wait(lp) \ + do { rmb(); \ + } while((lp)->lock) -#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) -#define spin_is_locked(lp) ((lp)->lock != 0) - -#define spin_unlock_wait(lp) \ -do { membar("#LoadLoad"); \ -} while((lp)->lock) - -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { unsigned long tmp; @@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) : "memory"); } -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { unsigned long result; @@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) return (result == 0UL); } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( " membar #StoreStore | #LoadStore\n" @@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) : "memory"); } -static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -115,48 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) : "memory"); } -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - volatile unsigned char lock; - unsigned int owner_pc, owner_cpu; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } -#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) -#define spin_is_locked(__lock) ((__lock)->lock != 0) -#define spin_unlock_wait(__lock) \ -do { \ - membar("#LoadLoad"); \ -} while((__lock)->lock) - -extern void _do_spin_lock (spinlock_t *lock, char *str); -extern void _do_spin_unlock (spinlock_t *lock); -extern int _do_spin_trylock (spinlock_t *lock); - -#define _raw_spin_trylock(lp) _do_spin_trylock(lp) -#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") -#define _raw_spin_unlock(lock) _do_spin_unlock(lock) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - -#endif /* CONFIG_DEBUG_SPINLOCK */ - /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -#ifndef CONFIG_DEBUG_SPINLOCK - -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) {0,} -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) - -static void inline __read_lock(rwlock_t *lock) +static void inline __read_lock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -181,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) : "memory"); } -static void inline __read_unlock(rwlock_t *lock) +static void inline __read_unlock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -198,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) : "memory"); } -static void inline __write_lock(rwlock_t *lock) +static void inline __write_lock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -225,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) : "memory"); } -static void inline __write_unlock(rwlock_t *lock) +static void inline __write_unlock(raw_rwlock_t *lock) { __asm__ __volatile__( " membar #LoadStore | #StoreStore\n" @@ -235,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) : "memory"); } -static int inline __write_trylock(rwlock_t *lock) +static int inline __write_trylock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -260,73 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) return result; } -#define _raw_read_lock(p) __read_lock(p) -#define _raw_read_unlock(p) __read_unlock(p) -#define _raw_write_lock(p) __write_lock(p) -#define _raw_write_unlock(p) __write_unlock(p) -#define _raw_write_trylock(p) __write_trylock(p) - -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - volatile unsigned long lock; - unsigned int writer_pc, writer_cpu; - unsigned int reader_pc[NR_CPUS]; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) - -extern void _do_read_lock(rwlock_t *rw, char *str); -extern void _do_read_unlock(rwlock_t *rw, char *str); -extern void _do_write_lock(rwlock_t *rw, char *str); -extern void _do_write_unlock(rwlock_t *rw); -extern int _do_write_trylock(rwlock_t *rw, char *str); - -#define _raw_read_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_lock(lock, "read_lock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_read_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_unlock(lock, "read_unlock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_lock(lock, "write_lock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_unlock(lock); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_trylock(lock) \ -({ unsigned long flags; \ - int val; \ - local_irq_save(flags); \ - val = _do_write_trylock(lock, "write_trylock"); \ - local_irq_restore(flags); \ - val; \ -}) - -#endif /* CONFIG_DEBUG_SPINLOCK */ - -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) -#define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define write_can_lock(rw) (!(rw)->lock) +#define __raw_read_lock(p) __read_lock(p) +#define __raw_read_unlock(p) __read_unlock(p) +#define __raw_write_lock(p) __write_lock(p) +#define __raw_write_unlock(p) __write_unlock(p) +#define __raw_write_trylock(p) __write_trylock(p) + +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) +#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define __raw_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 00000000000..e128112a0d7 --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef __SPARC64_SPINLOCK_TYPES_H +#define __SPARC64_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned char lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index ee4bdfc6b88..b5417529f6f 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -28,6 +28,49 @@ enum sparc_cpu { #define ARCH_SUN4C_SUN4 0 #define ARCH_SUN4 0 +/* These are here in an effort to more fully work around Spitfire Errata + * #51. Essentially, if a memory barrier occurs soon after a mispredicted + * branch, the chip can stop executing instructions until a trap occurs. + * Therefore, if interrupts are disabled, the chip can hang forever. + * + * It used to be believed that the memory barrier had to be right in the + * delay slot, but a case has been traced recently wherein the memory barrier + * was one instruction after the branch delay slot and the chip still hung. + * The offending sequence was the following in sym_wakeup_done() of the + * sym53c8xx_2 driver: + * + * call sym_ccb_from_dsa, 0 + * movge %icc, 0, %l0 + * brz,pn %o0, .LL1303 + * mov %o0, %l2 + * membar #LoadLoad + * + * The branch has to be mispredicted for the bug to occur. Therefore, we put + * the memory barrier explicitly into a "branch always, predicted taken" + * delay slot to avoid the problem case. + */ +#define membar_safe(type) \ +do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + " membar " type "\n" \ + "1:\n" \ + : : : "memory"); \ +} while (0) + +#define mb() \ + membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") +#define rmb() \ + membar_safe("#LoadLoad") +#define wmb() \ + membar_safe("#StoreStore") +#define membar_storeload() \ + membar_safe("#StoreLoad") +#define membar_storeload_storestore() \ + membar_safe("#StoreLoad | #StoreStore") +#define membar_storeload_loadload() \ + membar_safe("#StoreLoad | #LoadLoad") +#define membar_storestore_loadstore() \ + membar_safe("#StoreStore | #LoadStore") + #endif #define setipl(__new_ipl) \ @@ -78,16 +121,11 @@ enum sparc_cpu { #define nop() __asm__ __volatile__ ("nop") -#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory") -#define mb() \ - membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") -#define rmb() membar("#LoadLoad") -#define wmb() membar("#StoreStore") #define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) \ - do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) + do { __var = __value; membar_storeload_storestore(); } while(0) #define set_wmb(__var, __value) \ - do { __var = __value; membar("#StoreStore"); } while(0) + do { __var = __value; wmb(); } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-sparc64/types.h b/include/asm-sparc64/types.h index 6248ed1a9a7..d0ee7f10583 100644 --- a/include/asm-sparc64/types.h +++ b/include/asm-sparc64/types.h @@ -56,8 +56,6 @@ typedef unsigned long u64; typedef u32 dma_addr_t; typedef u64 dma64_addr_t; -typedef unsigned short kmem_bufctl_t; - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 5690142f82d..80a65d7e3db 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -59,12 +59,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si return 1; } -/* this function will go away soon - use access_ok() instead */ -static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) -{ - return 0; -} - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is |