diff options
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 65 | ||||
-rw-r--r-- | arch/s390/include/asm/cache.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/cacheflush.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/ccwdev.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/ccwgroup.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/cio.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 225 | ||||
-rw-r--r-- | arch/s390/include/asm/futex.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/rwsem.h | 63 | ||||
-rw-r--r-- | arch/s390/include/asm/system.h | 196 | ||||
-rw-r--r-- | arch/s390/include/asm/types.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 6 |
14 files changed, 310 insertions, 312 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea11718..d9db13810d1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -9,7 +9,7 @@ * * Atomic operations that C can't guarantee us. * Useful for resource counting etc. - * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. + * s390 uses 'Compare And Swap' for atomicity in SMP environment. * */ @@ -36,14 +36,19 @@ static inline int atomic_read(const atomic_t *v) { - barrier(); - return v->counter; + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic_set(atomic_t *v, int i) { - v->counter = i; - barrier(); + asm volatile( + " st %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long long atomic64_read(const atomic64_t *v) { - barrier(); - return v->counter; + long long c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic64_set(atomic64_t *v, long long i) { - v->counter = i; - barrier(); + asm volatile( + " stg %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline long long atomic64_add_return(long long i, atomic64_t *v) diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 2e05972c508..e1c8f3a4988 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -742,18 +742,42 @@ static inline int sched_find_first_bit(unsigned long *b) * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 */ -#define ext2_set_bit(nr, addr) \ - __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_test_bit(nr, addr) \ - test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) - -static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) +static inline void __set_bit_le(unsigned long nr, void *addr) +{ + __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline void __clear_bit_le(unsigned long nr, void *addr) +{ + __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int __test_and_set_bit_le(unsigned long nr, void *addr) +{ + return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_and_set_bit_le(unsigned long nr, void *addr) +{ + return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int __test_and_clear_bit_le(unsigned long nr, void *addr) +{ + return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_and_clear_bit_le(unsigned long nr, void *addr) +{ + return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_bit_le(unsigned long nr, const void *addr) +{ + return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) { unsigned long bytes, bits; @@ -764,7 +788,7 @@ static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) return (bits < size) ? bits : size; } -static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, +static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, unsigned long offset) { unsigned long *addr = vaddr, *p; @@ -790,11 +814,10 @@ static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, size -= __BITOPS_WORDSIZE; p++; } - return offset + ext2_find_first_zero_bit(p, size); + return offset + find_first_zero_bit_le(p, size); } -static inline unsigned long ext2_find_first_bit(void *vaddr, - unsigned long size) +static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) { unsigned long bytes, bits; @@ -805,7 +828,7 @@ static inline unsigned long ext2_find_first_bit(void *vaddr, return (bits < size) ? bits : size; } -static inline int ext2_find_next_bit(void *vaddr, unsigned long size, +static inline int find_next_bit_le(void *vaddr, unsigned long size, unsigned long offset) { unsigned long *addr = vaddr, *p; @@ -831,10 +854,14 @@ static inline int ext2_find_next_bit(void *vaddr, unsigned long size, size -= __BITOPS_WORDSIZE; p++; } - return offset + ext2_find_first_bit(p, size); + return offset + find_first_bit_le(p, size); } -#include <asm-generic/bitops/minix.h> +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit_le(nr, addr) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit_le(nr, addr) + #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b64..2a30d5ac066 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -13,6 +13,7 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 +#define NET_SKB_PAD 32 #define __read_mostly __attribute__((__section__(".data..read_mostly"))) diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 7e1f7762062..43a5c78046d 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -8,4 +8,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable); #endif +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + #endif /* _S390_CACHEFLUSH_H */ diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index ff6f62e0ec3..623f2fb7177 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -112,7 +112,6 @@ enum uc_todo { /** * struct ccw driver - device driver for channel attached devices - * @owner: owning module * @ids: ids supported by this driver * @probe: function called on probe * @remove: function called on remove @@ -128,10 +127,8 @@ enum uc_todo { * @restore: callback for restoring after hibernation * @uc_handler: callback for unit check handler * @driver: embedded device driver structure - * @name: device driver name */ struct ccw_driver { - struct module *owner; struct ccw_device_id *ids; int (*probe) (struct ccw_device *); void (*remove) (struct ccw_device *); @@ -147,7 +144,6 @@ struct ccw_driver { int (*restore)(struct ccw_device *); enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); struct device_driver driver; - char *name; }; extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index c79c1e787b8..f2ea2c56a7e 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -29,8 +29,6 @@ struct ccwgroup_device { /** * struct ccwgroup_driver - driver for ccw group devices - * @owner: driver owner - * @name: driver name * @max_slaves: maximum number of slave devices * @driver_id: unique id * @probe: function called on probe @@ -46,8 +44,6 @@ struct ccwgroup_device { * @driver: embedded driver structure */ struct ccwgroup_driver { - struct module *owner; - char *name; int max_slaves; unsigned long driver_id; diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index e34347d567a..fc50a3342da 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -183,7 +183,7 @@ struct esw3 { * The irb that is handed to the device driver when an interrupt occurs. For * solicited interrupts, the common I/O layer already performs checks whether * a field is valid; a field not being valid is always passed as %0. - * If a unit check occured, @ecw may contain sense data; this is retrieved + * If a unit check occurred, @ecw may contain sense data; this is retrieved * by the common I/O layer itself if the device doesn't support concurrent * sense (so that the device driver never needs to perform basic sene itself). * For unsolicited interrupts, the irb is passed as-is (expect for sense data, diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h new file mode 100644 index 00000000000..7488e52efa9 --- /dev/null +++ b/arch/s390/include/asm/cmpxchg.h @@ -0,0 +1,225 @@ +/* + * Copyright IBM Corp. 1999, 2011 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + */ + +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/types.h> + +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long __xchg(unsigned long x, void *ptr, int size) +{ + unsigned long addr, old; + int shift; + + switch (size) { + case 1: + addr = (unsigned long) ptr; + shift = (3 ^ (addr & 3)) << 3; + addr ^= addr & 3; + asm volatile( + " l %0,%4\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%4\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(255 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); + return old >> shift; + case 2: + addr = (unsigned long) ptr; + shift = (2 ^ (addr & 2)) << 3; + addr ^= addr & 2; + asm volatile( + " l %0,%4\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%4\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(65535 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); + return old >> shift; + case 4: + asm volatile( + " l %0,%3\n" + "0: cs %0,%2,%3\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) ptr) + : "d" (x), "Q" (*(int *) ptr) + : "memory", "cc"); + return old; +#ifdef CONFIG_64BIT + case 8: + asm volatile( + " lg %0,%3\n" + "0: csg %0,%2,%3\n" + " jl 0b\n" + : "=&d" (old), "=m" (*(long *) ptr) + : "d" (x), "Q" (*(long *) ptr) + : "memory", "cc"); + return old; +#endif /* CONFIG_64BIT */ + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\ + __ret; \ +}) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#define __HAVE_ARCH_CMPXCHG + +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long __cmpxchg(void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long addr, prev, tmp; + int shift; + + switch (size) { + case 1: + addr = (unsigned long) ptr; + shift = (3 ^ (addr & 3)) << 3; + addr ^= addr & 3; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(255 << shift)), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev >> shift; + case 2: + addr = (unsigned long) ptr; + shift = (2 ^ (addr & 2)) << 3; + addr ^= addr & 2; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(65535 << shift)), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev >> shift; + case 4: + asm volatile( + " cs %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(int *) ptr) + : "0" (old), "d" (new), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev; +#ifdef CONFIG_64BIT + case 8: + asm volatile( + " csg %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(long *) ptr) + : "0" (old), "d" (new), "Q" (*(long *) ptr) + : "memory", "cc"); + return prev; +#endif /* CONFIG_64BIT */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +#ifdef CONFIG_64BIT +#define cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ +}) +#else /* CONFIG_64BIT */ +static inline unsigned long long __cmpxchg64(void *ptr, + unsigned long long old, + unsigned long long new) +{ + register_pair rp_old = {.pair = old}; + register_pair rp_new = {.pair = new}; + + asm volatile( + " cds %0,%2,%1" + : "+&d" (rp_old), "=Q" (ptr) + : "d" (rp_new), "Q" (ptr) + : "cc"); + return rp_old.pair; +} +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) +#endif /* CONFIG_64BIT */ + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + return __cmpxchg(ptr, old, new, size); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e..81cf36b691f 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -7,7 +7,7 @@ #include <linux/uaccess.h> #include <asm/errno.h> -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); + return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 423fdda2322..d0eb4653ceb 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -43,29 +43,6 @@ #ifdef __KERNEL__ -#include <linux/list.h> -#include <linux/spinlock.h> - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #ifndef __s390x__ #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -81,41 +58,6 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) /* - * initialisation - */ - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - - -/* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) @@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) return new; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _S390_RWSEM_H */ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 8f8d759f6a7..d382629a017 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -14,6 +14,7 @@ #include <asm/setup.h> #include <asm/processor.h> #include <asm/lowcore.h> +#include <asm/cmpxchg.h> #ifdef __KERNEL__ @@ -120,161 +121,6 @@ extern int memcpy_real(void *, void *, size_t); #define nop() asm volatile("nop") -#define xchg(ptr,x) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ - __ret; \ -}) - -extern void __xchg_called_with_bad_pointer(void); - -static inline unsigned long __xchg(unsigned long x, void * ptr, int size) -{ - unsigned long addr, old; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" (x << shift), "d" (~(255 << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" (x << shift), "d" (~(65535 << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%3\n" - "0: cs %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) ptr) - : "d" (x), "Q" (*(int *) ptr) - : "memory", "cc"); - return old; -#ifdef __s390x__ - case 8: - asm volatile( - " lg %0,%3\n" - "0: csg %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=m" (*(long *) ptr) - : "d" (x), "Q" (*(long *) ptr) - : "memory", "cc"); - return old; -#endif /* __s390x__ */ - } - __xchg_called_with_bad_pointer(); - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) - -extern void __cmpxchg_called_with_bad_pointer(void); - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - unsigned long addr, prev, tmp; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) - : "d" (old << shift), "d" (new << shift), - "d" (~(255 << shift)), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) - : "d" (old << shift), "d" (new << shift), - "d" (~(65535 << shift)), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev >> shift; - case 4: - asm volatile( - " cs %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(int *) ptr) - : "0" (old), "d" (new), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev; -#ifdef __s390x__ - case 8: - asm volatile( - " csg %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(long *) ptr) - : "0" (old), "d" (new), "Q" (*(long *) ptr) - : "memory", "cc"); - return prev; -#endif /* __s390x__ */ - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -353,46 +199,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __ctl_load(__dummy, cr, cr); \ }) -#include <linux/irqflags.h> - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 1: - case 2: - case 4: -#ifdef __s390x__ - case 8: -#endif - return __cmpxchg(ptr, old, new, size); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef __s390x__ -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Use to set psw mask except for the first byte which * won't be changed by this function. diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index 04d6b95a89c..eeb52ccf499 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h @@ -30,14 +30,6 @@ typedef __signed__ long saddr_t; #ifndef __ASSEMBLY__ -typedef u64 dma64_addr_t; -#ifdef __s390x__ -/* DMA addresses come in 32-bit and 64-bit flavours. */ -typedef u64 dma_addr_t; -#else -typedef u32 dma_addr_t; -#endif - #ifndef __s390x__ typedef union { unsigned long long pair; diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52..2d9ea11f919 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -83,8 +83,8 @@ struct uaccess_ops { size_t (*clear_user)(size_t, void __user *); size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(int __user *, int old, int new); + int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); }; extern struct uaccess_ops uaccess; diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 1049ef27c15..e8215257237 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -272,7 +272,11 @@ #define __NR_fanotify_init 332 #define __NR_fanotify_mark 333 #define __NR_prlimit64 334 -#define NR_syscalls 335 +#define __NR_name_to_handle_at 335 +#define __NR_open_by_handle_at 336 +#define __NR_clock_adjtime 337 +#define __NR_syncfs 338 +#define NR_syscalls 339 /* * There are some system calls that are not present on 64 bit, some |