diff options
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r-- | arch/parisc/include/asm/atomic.h | 108 | ||||
-rw-r--r-- | arch/parisc/include/asm/barrier.h | 35 | ||||
-rw-r--r-- | arch/parisc/include/asm/cmpxchg.h | 116 | ||||
-rw-r--r-- | arch/parisc/include/asm/delay.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/dma.h | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/exec.h | 6 | ||||
-rw-r--r-- | arch/parisc/include/asm/futex.h | 31 | ||||
-rw-r--r-- | arch/parisc/include/asm/ldcw.h | 48 | ||||
-rw-r--r-- | arch/parisc/include/asm/posix_types.h | 119 | ||||
-rw-r--r-- | arch/parisc/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/psw.h | 41 | ||||
-rw-r--r-- | arch/parisc/include/asm/special_insns.h | 40 | ||||
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/switch_to.h | 12 | ||||
-rw-r--r-- | arch/parisc/include/asm/system.h | 165 | ||||
-rw-r--r-- | arch/parisc/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/timex.h | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/uaccess.h | 1 |
18 files changed, 338 insertions, 392 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 4054b31e0fa..6c6defc2461 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -6,7 +6,7 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -49,112 +49,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) #endif -/* This should get optimized out since it's never called. -** Or get a link error if xchg is used "wrong". -*/ -extern void __xchg_called_with_bad_pointer(void); - - -/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); -#ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); -#endif - -/* optimizer better get rid of switch since size is a constant */ -static __inline__ unsigned long -__xchg(unsigned long x, __volatile__ void * ptr, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __xchg64(x,(unsigned long *) ptr); -#endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); - } - __xchg_called_with_bad_pointer(); - return x; -} - - -/* -** REVISIT - Abandoned use of LDCW in xchg() for now: -** o need to test sizeof(*ptr) to avoid clearing adjacent bytes -** o and while we are at it, could CONFIG_64BIT code use LDCD too? -** -** if (__builtin_constant_p(x) && (x == NULL)) -** if (((unsigned long)p & 0xf) == 0) -** return __ldcw(p); -*/ -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - - -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); - -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); -extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); - -/* don't worry...optimizer will get rid of most of this */ -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new_, int size) -{ - switch (size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32(ptr, old, new_); - default: - return __cmpxchg_local_generic(ptr, old, new_, size); - } -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees inconsistent values. diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h new file mode 100644 index 00000000000..e77d834aa80 --- /dev/null +++ b/arch/parisc/include/asm/barrier.h @@ -0,0 +1,35 @@ +#ifndef __PARISC_BARRIER_H +#define __PARISC_BARRIER_H + +/* +** This is simply the barrier() macro from linux/kernel.h but when serial.c +** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h +** hasn't yet been included yet so it fails, thus repeating the macro here. +** +** PA-RISC architecture allows for weakly ordered memory accesses although +** none of the processors use it. There is a strong ordered bit that is +** set in the O-bit of the page directory entry. Operating systems that +** can not tolerate out of order accesses should set this bit when mapping +** pages. The O-bit of the PSW should also be set to 1 (I don't believe any +** of the processor implemented the PSW O-bit). The PCX-W ERS states that +** the TLB O-bit is not implemented so the page directory does not need to +** have the O-bit set when mapping pages (section 3.1). This section also +** states that the PSW Y, Z, G, and O bits are not implemented. +** So it looks like nothing needs to be done for parisc-linux (yet). +** (thanks to chada for the above comment -ggg) +** +** The __asm__ op below simple prevents gcc/ld from reordering +** instructions across the mb() "call". +*/ +#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ +#define rmb() mb() +#define wmb() mb() +#define smp_mb() mb() +#define smp_rmb() mb() +#define smp_wmb() mb() +#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#endif /* __PARISC_BARRIER_H */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h new file mode 100644 index 00000000000..dbd13354ec4 --- /dev/null +++ b/arch/parisc/include/asm/cmpxchg.h @@ -0,0 +1,116 @@ +/* + * forked from parisc asm/atomic.h which was: + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> + */ + +#ifndef _ASM_PARISC_CMPXCHG_H_ +#define _ASM_PARISC_CMPXCHG_H_ + +/* This should get optimized out since it's never called. +** Or get a link error if xchg is used "wrong". +*/ +extern void __xchg_called_with_bad_pointer(void); + +/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __xchg8(char, char *); +extern unsigned long __xchg32(int, int *); +#ifdef CONFIG_64BIT +extern unsigned long __xchg64(unsigned long, unsigned long *); +#endif + +/* optimizer better get rid of switch since size is a constant */ +static inline unsigned long +__xchg(unsigned long x, __volatile__ void *ptr, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __xchg64(x, (unsigned long *) ptr); +#endif + case 4: return __xchg32((int) x, (int *) ptr); + case 1: return __xchg8((char) x, (char *) ptr); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* +** REVISIT - Abandoned use of LDCW in xchg() for now: +** o need to test sizeof(*ptr) to avoid clearing adjacent bytes +** o and while we are at it, could CONFIG_64BIT code use LDCD too? +** +** if (__builtin_constant_p(x) && (x == NULL)) +** if (((unsigned long)p & 0xf) == 0) +** return __ldcw(p); +*/ +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#define __HAVE_ARCH_CMPXCHG 1 + +/* bug catcher for when unsupported size is used - won't link */ +extern void __cmpxchg_called_with_bad_pointer(void); + +/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ +extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, + unsigned int new_); +extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, + unsigned long old, unsigned long new_); + +/* don't worry...optimizer will get rid of most of this */ +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32((unsigned int *)ptr, + (unsigned int)old, (unsigned int)new_); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32(ptr, old, new_); + default: + return __cmpxchg_local_generic(ptr, old, new_, size); + } +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#ifdef CONFIG_64BIT +#define cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ +}) +#else +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* _ASM_PARISC_CMPXCHG_H_ */ diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h index 7a75e984674..912ee7e6a57 100644 --- a/arch/parisc/include/asm/delay.h +++ b/arch/parisc/include/asm/delay.h @@ -1,7 +1,7 @@ #ifndef _PARISC_DELAY_H #define _PARISC_DELAY_H -#include <asm/system.h> /* for mfctl() */ +#include <asm/special_insns.h> /* for mfctl() */ #include <asm/processor.h> /* for boot_cpu_data */ diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index f7a18f96870..fd48ae2de95 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -9,7 +9,6 @@ #define _ASM_DMA_H #include <asm/io.h> /* need byte IO */ -#include <asm/system.h> #define dma_outb outb #define dma_inb inb diff --git a/arch/parisc/include/asm/exec.h b/arch/parisc/include/asm/exec.h new file mode 100644 index 00000000000..6bb5af75b17 --- /dev/null +++ b/arch/parisc/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __PARISC_EXEC_H +#define __PARISC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __PARISC_EXEC_H */ diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 2388bdb3283..49df14805a9 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -8,6 +8,29 @@ #include <asm/atomic.h> #include <asm/errno.h> +/* The following has to match the LWS code in syscall.S. We have + sixteen four-word locks. */ + +static inline void +_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) +{ + extern u32 lws_lock_start[]; + long index = ((long)uaddr & 0xf0) >> 2; + arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; + local_irq_save(*flags); + arch_spin_lock(s); +} + +static inline void +_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) +{ + extern u32 lws_lock_start[]; + long index = ((long)uaddr & 0xf0) >> 2; + arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; + arch_spin_unlock(s); + local_irq_restore(*flags); +} + static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { @@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) pagefault_disable(); - _atomic_spin_lock_irqsave(uaddr, flags); + _futex_spin_lock_irqsave(uaddr, &flags); switch (op) { case FUTEX_OP_SET: @@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ret = -ENOSYS; } - _atomic_spin_unlock_irqrestore(uaddr, flags); + _futex_spin_unlock_irqrestore(uaddr, &flags); pagefault_enable(); @@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, * address. This should scale to a couple of CPUs. */ - _atomic_spin_lock_irqsave(uaddr, flags); + _futex_spin_lock_irqsave(uaddr, &flags); ret = get_user(val, uaddr); @@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, *uval = val; - _atomic_spin_unlock_irqrestore(uaddr, flags); + _futex_spin_unlock_irqrestore(uaddr, &flags); return ret; } diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h new file mode 100644 index 00000000000..d2d11b7055b --- /dev/null +++ b/arch/parisc/include/asm/ldcw.h @@ -0,0 +1,48 @@ +#ifndef __PARISC_LDCW_H +#define __PARISC_LDCW_H + +#ifndef CONFIG_PA20 +/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, + and GCC only guarantees 8-byte alignment for stack locals, we can't + be assured of 16-byte alignment for atomic lock data even if we + specify "__attribute ((aligned(16)))" in the type declaration. So, + we use a struct containing an array of four ints for the atomic lock + type and dynamically select the 16-byte aligned int from the array + for the semaphore. */ + +#define __PA_LDCW_ALIGNMENT 16 +#define __ldcw_align(a) ({ \ + unsigned long __ret = (unsigned long) &(a)->lock[0]; \ + __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ + & ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ +}) +#define __LDCW "ldcw" + +#else /*CONFIG_PA20*/ +/* From: "Jim Hull" <jim.hull of hp.com> + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is specified, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). */ + +#define __PA_LDCW_ALIGNMENT 4 +#define __ldcw_align(a) (&(a)->slock) +#define __LDCW "ldcw,co" + +#endif /*!CONFIG_PA20*/ + +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +#define __ldcw(a) ({ \ + unsigned __ret; \ + __asm__ __volatile__(__LDCW " 0(%2),%0" \ + : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) +#endif + +#endif /* __PARISC_LDCW_H */ diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h index 00da29a340b..5212b0357da 100644 --- a/arch/parisc/include/asm/posix_types.h +++ b/arch/parisc/include/asm/posix_types.h @@ -6,123 +6,22 @@ * be a little careful about namespace pollution etc. Also, we cannot * assume GCC is being used. */ -typedef unsigned long __kernel_ino_t; + typedef unsigned short __kernel_mode_t; +#define __kernel_mode_t __kernel_mode_t + typedef unsigned short __kernel_nlink_t; -typedef long __kernel_off_t; -typedef int __kernel_pid_t; +#define __kernel_nlink_t __kernel_nlink_t + typedef unsigned short __kernel_ipc_pid_t; -typedef unsigned int __kernel_uid_t; -typedef unsigned int __kernel_gid_t; -typedef int __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_timer_t; -typedef int __kernel_clockid_t; -typedef int __kernel_daddr_t; -/* Note these change from narrow to wide kernels */ -#ifdef CONFIG_64BIT -typedef unsigned long __kernel_size_t; -typedef long __kernel_ssize_t; -typedef long __kernel_ptrdiff_t; -#else -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; -#endif -typedef long __kernel_time_t; -typedef char * __kernel_caddr_t; +#define __kernel_ipc_pid_t __kernel_ipc_pid_t -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef unsigned int __kernel_uid32_t; -typedef unsigned int __kernel_gid32_t; +typedef int __kernel_suseconds_t; +#define __kernel_suseconds_t __kernel_suseconds_t -#ifdef __GNUC__ -typedef long long __kernel_loff_t; typedef long long __kernel_off64_t; typedef unsigned long long __kernel_ino64_t; -#endif - -typedef unsigned int __kernel_old_dev_t; - -typedef struct { - int val[2]; -} __kernel_fsid_t; - -/* compatibility stuff */ -typedef __kernel_uid_t __kernel_old_uid_t; -typedef __kernel_gid_t __kernel_old_gid_t; - -#if defined(__KERNEL__) - -#undef __FD_SET -static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); -} - -#undef __FD_CLR -static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); -} - -#undef __FD_ISSET -static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; -} - -/* - * This will unroll the loop for the normal constant case (8 ints, - * for a 256-bit fd_set) - */ -#undef __FD_ZERO -static __inline__ void __FD_ZERO(__kernel_fd_set *__p) -{ - unsigned long *__tmp = __p->fds_bits; - int __i; - - if (__builtin_constant_p(__FDSET_LONGS)) { - switch (__FDSET_LONGS) { - case 16: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - __tmp[ 4] = 0; __tmp[ 5] = 0; - __tmp[ 6] = 0; __tmp[ 7] = 0; - __tmp[ 8] = 0; __tmp[ 9] = 0; - __tmp[10] = 0; __tmp[11] = 0; - __tmp[12] = 0; __tmp[13] = 0; - __tmp[14] = 0; __tmp[15] = 0; - return; - - case 8: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - __tmp[ 4] = 0; __tmp[ 5] = 0; - __tmp[ 6] = 0; __tmp[ 7] = 0; - return; - - case 4: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - return; - } - } - __i = __FDSET_LONGS; - while (__i) { - __i--; - *__tmp = 0; - __tmp++; - } -} -#endif /* defined(__KERNEL__) */ +#include <asm-generic/posix_types.h> #endif diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 7213ec9e594..acdf4cad612 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -16,7 +16,6 @@ #include <asm/pdc.h> #include <asm/ptrace.h> #include <asm/types.h> -#include <asm/system.h> #include <asm/percpu.h> #endif /* __ASSEMBLY__ */ @@ -169,6 +168,7 @@ struct thread_struct { * Return saved PC of a blocked thread. This is used by ps mostly. */ +struct task_struct; unsigned long thread_saved_pc(struct task_struct *t); void show_trace(struct task_struct *task, unsigned long *stack); diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h index 5a3e23c9ce6..ad69a35e9c0 100644 --- a/arch/parisc/include/asm/psw.h +++ b/arch/parisc/include/asm/psw.h @@ -59,4 +59,45 @@ #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) +#ifndef __ASSEMBLY__ + +/* The program status word as bitfields. */ +struct pa_psw { + unsigned int y:1; + unsigned int z:1; + unsigned int rv:2; + unsigned int w:1; + unsigned int e:1; + unsigned int s:1; + unsigned int t:1; + + unsigned int h:1; + unsigned int l:1; + unsigned int n:1; + unsigned int x:1; + unsigned int b:1; + unsigned int c:1; + unsigned int v:1; + unsigned int m:1; + + unsigned int cb:8; + + unsigned int o:1; + unsigned int g:1; + unsigned int f:1; + unsigned int r:1; + unsigned int q:1; + unsigned int p:1; + unsigned int d:1; + unsigned int i:1; +}; + +#ifdef CONFIG_64BIT +#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) +#else +#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) +#endif + +#endif /* !__ASSEMBLY__ */ + #endif diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h new file mode 100644 index 00000000000..d306b75bc77 --- /dev/null +++ b/arch/parisc/include/asm/special_insns.h @@ -0,0 +1,40 @@ +#ifndef __PARISC_SPECIAL_INSNS_H +#define __PARISC_SPECIAL_INSNS_H + +#define mfctl(reg) ({ \ + unsigned long cr; \ + __asm__ __volatile__( \ + "mfctl " #reg ",%0" : \ + "=r" (cr) \ + ); \ + cr; \ +}) + +#define mtctl(gr, cr) \ + __asm__ __volatile__("mtctl %0,%1" \ + : /* no outputs */ \ + : "r" (gr), "i" (cr) : "memory") + +/* these are here to de-mystefy the calling code, and to provide hooks */ +/* which I needed for debugging EIEM problems -PB */ +#define get_eiem() mfctl(15) +static inline void set_eiem(unsigned long val) +{ + mtctl(val, 15); +} + +#define mfsp(reg) ({ \ + unsigned long cr; \ + __asm__ __volatile__( \ + "mfsp " #reg ",%0" : \ + "=r" (cr) \ + ); \ + cr; \ +}) + +#define mtsp(gr, cr) \ + __asm__ __volatile__("mtsp %0,%1" \ + : /* no outputs */ \ + : "r" (gr), "i" (cr) : "memory") + +#endif /* __PARISC_SPECIAL_INSNS_H */ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 74036f436a3..804aa28ab1d 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,7 +1,6 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/system.h> #include <asm/processor.h> #include <asm/spinlock_types.h> diff --git a/arch/parisc/include/asm/switch_to.h b/arch/parisc/include/asm/switch_to.h new file mode 100644 index 00000000000..8ed8fea1e78 --- /dev/null +++ b/arch/parisc/include/asm/switch_to.h @@ -0,0 +1,12 @@ +#ifndef __PARISC_SWITCH_TO_H +#define __PARISC_SWITCH_TO_H + +struct task_struct; + +extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); + +#define switch_to(prev, next, last) do { \ + (last) = _switch_to(prev, next); \ +} while(0) + +#endif /* __PARISC_SWITCH_TO_H */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h deleted file mode 100644 index b19e63a8e84..00000000000 --- a/arch/parisc/include/asm/system.h +++ /dev/null @@ -1,165 +0,0 @@ -#ifndef __PARISC_SYSTEM_H -#define __PARISC_SYSTEM_H - -#include <linux/irqflags.h> - -/* The program status word as bitfields. */ -struct pa_psw { - unsigned int y:1; - unsigned int z:1; - unsigned int rv:2; - unsigned int w:1; - unsigned int e:1; - unsigned int s:1; - unsigned int t:1; - - unsigned int h:1; - unsigned int l:1; - unsigned int n:1; - unsigned int x:1; - unsigned int b:1; - unsigned int c:1; - unsigned int v:1; - unsigned int m:1; - - unsigned int cb:8; - - unsigned int o:1; - unsigned int g:1; - unsigned int f:1; - unsigned int r:1; - unsigned int q:1; - unsigned int p:1; - unsigned int d:1; - unsigned int i:1; -}; - -#ifdef CONFIG_64BIT -#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) -#else -#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) -#endif - -struct task_struct; - -extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); - -#define switch_to(prev, next, last) do { \ - (last) = _switch_to(prev, next); \ -} while(0) - -#define mfctl(reg) ({ \ - unsigned long cr; \ - __asm__ __volatile__( \ - "mfctl " #reg ",%0" : \ - "=r" (cr) \ - ); \ - cr; \ -}) - -#define mtctl(gr, cr) \ - __asm__ __volatile__("mtctl %0,%1" \ - : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") - -/* these are here to de-mystefy the calling code, and to provide hooks */ -/* which I needed for debugging EIEM problems -PB */ -#define get_eiem() mfctl(15) -static inline void set_eiem(unsigned long val) -{ - mtctl(val, 15); -} - -#define mfsp(reg) ({ \ - unsigned long cr; \ - __asm__ __volatile__( \ - "mfsp " #reg ",%0" : \ - "=r" (cr) \ - ); \ - cr; \ -}) - -#define mtsp(gr, cr) \ - __asm__ __volatile__("mtsp %0,%1" \ - : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") - - -/* -** This is simply the barrier() macro from linux/kernel.h but when serial.c -** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h -** hasn't yet been included yet so it fails, thus repeating the macro here. -** -** PA-RISC architecture allows for weakly ordered memory accesses although -** none of the processors use it. There is a strong ordered bit that is -** set in the O-bit of the page directory entry. Operating systems that -** can not tolerate out of order accesses should set this bit when mapping -** pages. The O-bit of the PSW should also be set to 1 (I don't believe any -** of the processor implemented the PSW O-bit). The PCX-W ERS states that -** the TLB O-bit is not implemented so the page directory does not need to -** have the O-bit set when mapping pages (section 3.1). This section also -** states that the PSW Y, Z, G, and O bits are not implemented. -** So it looks like nothing needs to be done for parisc-linux (yet). -** (thanks to chada for the above comment -ggg) -** -** The __asm__ op below simple prevents gcc/ld from reordering -** instructions across the mb() "call". -*/ -#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ -#define rmb() mb() -#define wmb() mb() -#define smp_mb() mb() -#define smp_rmb() mb() -#define smp_wmb() mb() -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifndef CONFIG_PA20 -/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, - and GCC only guarantees 8-byte alignment for stack locals, we can't - be assured of 16-byte alignment for atomic lock data even if we - specify "__attribute ((aligned(16)))" in the type declaration. So, - we use a struct containing an array of four ints for the atomic lock - type and dynamically select the 16-byte aligned int from the array - for the semaphore. */ - -#define __PA_LDCW_ALIGNMENT 16 -#define __ldcw_align(a) ({ \ - unsigned long __ret = (unsigned long) &(a)->lock[0]; \ - __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ - & ~(__PA_LDCW_ALIGNMENT - 1); \ - (volatile unsigned int *) __ret; \ -}) -#define __LDCW "ldcw" - -#else /*CONFIG_PA20*/ -/* From: "Jim Hull" <jim.hull of hp.com> - I've attached a summary of the change, but basically, for PA 2.0, as - long as the ",CO" (coherent operation) completer is specified, then the - 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead - they only require "natural" alignment (4-byte for ldcw, 8-byte for - ldcd). */ - -#define __PA_LDCW_ALIGNMENT 4 -#define __ldcw_align(a) (&(a)->slock) -#define __LDCW "ldcw,co" - -#endif /*!CONFIG_PA20*/ - -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ -#define __ldcw(a) ({ \ - unsigned __ret; \ - __asm__ __volatile__(__LDCW " 0(%2),%0" \ - : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ - __ret; \ -}) - -#ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) -#endif - -#define arch_align_stack(x) (x) - -#endif diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 6d9c7c7973d..83ae7dd4d99 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <asm/processor.h> +#include <asm/special_insns.h> struct thread_info { struct task_struct *task; /* main task structure */ diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 3b68d77273d..2bd51f6d832 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -6,7 +6,6 @@ #ifndef _ASMPARISC_TIMEX_H #define _ASMPARISC_TIMEX_H -#include <asm/system.h> #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ff4cf9dab8d..9ac066086f0 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -5,7 +5,6 @@ * User space memory access functions */ #include <asm/page.h> -#include <asm/system.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> |