diff options
Diffstat (limited to 'include/asm-s390')
-rw-r--r-- | include/asm-s390/atomic.h | 176 | ||||
-rw-r--r-- | include/asm-s390/bitops.h | 1 | ||||
-rw-r--r-- | include/asm-s390/cache.h | 1 | ||||
-rw-r--r-- | include/asm-s390/ccwdev.h | 3 | ||||
-rw-r--r-- | include/asm-s390/elf.h | 2 | ||||
-rw-r--r-- | include/asm-s390/futex.h | 49 | ||||
-rw-r--r-- | include/asm-s390/ioctl.h | 89 | ||||
-rw-r--r-- | include/asm-s390/kexec.h | 5 | ||||
-rw-r--r-- | include/asm-s390/mman.h | 1 | ||||
-rw-r--r-- | include/asm-s390/mutex.h | 9 | ||||
-rw-r--r-- | include/asm-s390/processor.h | 8 | ||||
-rw-r--r-- | include/asm-s390/qdio.h | 8 | ||||
-rw-r--r-- | include/asm-s390/s390_rdev.h | 15 | ||||
-rw-r--r-- | include/asm-s390/sigcontext.h | 2 | ||||
-rw-r--r-- | include/asm-s390/system.h | 15 | ||||
-rw-r--r-- | include/asm-s390/thread_info.h | 2 | ||||
-rw-r--r-- | include/asm-s390/uaccess.h | 14 | ||||
-rw-r--r-- | include/asm-s390/unistd.h | 2 | ||||
-rw-r--r-- | include/asm-s390/vtoc.h | 24 |
19 files changed, 162 insertions, 264 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index b3bd4f679f7..be6fefe223d 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -5,7 +5,7 @@ * include/asm-s390/atomic.h * * S390 version - * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow, * Arnd Bergmann (arndb@de.ibm.com) @@ -45,59 +45,59 @@ typedef struct { #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic_add(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "ar"); -} static __inline__ int atomic_add_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "ar"); } -static __inline__ int atomic_add_negative(int i, atomic_t * v) -{ - return __CS_LOOP(v, i, "ar") < 0; -} -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "sr"); -} +#define atomic_add(_i, _v) atomic_add_return(_i, _v) +#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) +#define atomic_inc(_v) atomic_add_return(1, _v) +#define atomic_inc_return(_v) atomic_add_return(1, _v) +#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) + static __inline__ int atomic_sub_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "sr"); } -static __inline__ void atomic_inc(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "ar"); -} -static __inline__ int atomic_inc_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar"); -} +#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) +#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) +#define atomic_dec(_v) atomic_sub_return(1, _v) +#define atomic_dec_return(_v) atomic_sub_return(1, _v) +#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static __inline__ int atomic_inc_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar") == 0; -} -static __inline__ void atomic_dec(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr") == 0; -} static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, ~mask, "nr"); } + static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, mask, "or"); } + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + __asm__ __volatile__(" cs %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} + +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c != u; +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #undef __CS_LOOP #ifdef __s390x__ @@ -123,97 +123,67 @@ typedef struct { #define atomic64_read(v) ((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic64_add(long long i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "agr"); -} static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) -{ - return __CSG_LOOP(v, i, "agr") < 0; -} -static __inline__ void atomic64_sub(long long i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "sgr"); -} -static __inline__ void atomic64_inc(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr") == 0; -} -static __inline__ void atomic64_dec(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) +#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) +#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) +#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc_return(_v) atomic64_add_return(1, _v) +#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) + +static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) { - return __CSG_LOOP(v, 1, "sgr") == 0; + return __CSG_LOOP(v, i, "sgr"); } +#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) +#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, ~mask, "ngr"); } + static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, mask, "ogr"); } -#undef __CSG_LOOP -#endif - -/* - returns 0 if expected_oldval==value in *v ( swap was successful ) - returns 1 if unsuccessful. +static __inline__ long long atomic64_cmpxchg(atomic64_t *v, + long long old, long long new) +{ + __asm__ __volatile__(" csg %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} - This is non-portable, use bitops or spinlocks instead! -*/ -static __inline__ int -atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) +static __inline__ int atomic64_add_unless(atomic64_t *v, + long long a, long long u) { - int retval; - - __asm__ __volatile__( - " lr %0,%3\n" - " cs %0,%4,0(%2)\n" - " ipm %0\n" - " srl %0,28\n" - "0:" - : "=&d" (retval), "=m" (v->counter) - : "a" (v), "d" (expected_oldval) , "d" (new_val), - "m" (v->counter) : "cc", "memory" ); - return retval; + long long c, old; + + c = atomic64_read(v); + while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) + c = old; + return c != u; } -#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#undef __CSG_LOOP +#endif #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include <asm-generic/atomic.h> #endif /* __KERNEL__ */ #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index b07c578b22e..61232760cc3 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -839,6 +839,7 @@ static inline int sched_find_first_bit(unsigned long *b) * fls: find last bit set. */ #define fls(x) generic_fls(x) +#define fls64(x) generic_fls64(x) /* * hweightN: returns the hamming weight (i.e. the number diff --git a/include/asm-s390/cache.h b/include/asm-s390/cache.h index 29845378b20..e20cdd9074d 100644 --- a/include/asm-s390/cache.h +++ b/include/asm-s390/cache.h @@ -13,7 +13,6 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 -#define L1_CACHE_SHIFT_MAX 8 /* largest L1 which this arch supports */ #define ARCH_KMALLOC_MINALIGN 8 diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h index 3eb231af5d5..12456cb2f88 100644 --- a/include/asm-s390/ccwdev.h +++ b/include/asm-s390/ccwdev.h @@ -185,8 +185,5 @@ extern struct ccw_device *ccw_device_probe_console(void); extern int _ccw_device_get_device_number(struct ccw_device *); extern int _ccw_device_get_subchannel_number(struct ccw_device *); -extern struct device *s390_root_dev_register(const char *); -extern void s390_root_dev_unregister(struct device *); - extern void *ccw_device_get_chp_desc(struct ccw_device *, int); #endif /* _S390_CCWDEV_H_ */ diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h index 372d51cccd5..710646e64f7 100644 --- a/include/asm-s390/elf.h +++ b/include/asm-s390/elf.h @@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs) static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { - struct pt_regs *ptregs = __KSTK_PTREGS(tsk); + struct pt_regs *ptregs = task_pt_regs(tsk); memcpy(®s->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); regs->orig_gpr2 = ptregs->orig_gpr2; diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index 9feff4ce142..6a332a9f099 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h @@ -1,53 +1,6 @@ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H -#ifdef __KERNEL__ +#include <asm-generic/futex.h> -#include <linux/futex.h> -#include <asm/errno.h> -#include <asm/uaccess.h> - -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - inc_preempt_count(); - - switch (op) { - case FUTEX_OP_SET: - case FUTEX_OP_ADD: - case FUTEX_OP_OR: - case FUTEX_OP_ANDN: - case FUTEX_OP_XOR: - default: - ret = -ENOSYS; - } - - dec_preempt_count(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -#endif #endif diff --git a/include/asm-s390/ioctl.h b/include/asm-s390/ioctl.h index df7394345ac..b279fe06dfe 100644 --- a/include/asm-s390/ioctl.h +++ b/include/asm-s390/ioctl.h @@ -1,88 +1 @@ -/* - * include/asm-s390/ioctl.h - * - * S390 version - * - * Derived from "include/asm-i386/ioctl.h" - */ - -#ifndef _S390_IOCTL_H -#define _S390_IOCTL_H - -/* ioctl command encoding: 32 bits total, command in lower 16 bits, - * size of the parameter structure in the lower 14 bits of the - * upper 16 bits. - * Encoding the size of the parameter structure in the ioctl request - * is useful for catching programs compiled with old versions - * and to avoid overwriting user space outside the user buffer area. - * The highest 2 bits are reserved for indicating the ``access mode''. - * NOTE: This limits the max parameter size to 16kB -1 ! - */ - -/* - * The following is for compatibility across the various Linux - * platforms. The i386 ioctl numbering scheme doesn't really enforce - * a type field. De facto, however, the top 8 bits of the lower 16 - * bits are indeed used as a type field, so we might just as well make - * this explicit here. Please be sure to use the decoding macros - * below from now on. - */ -#define _IOC_NRBITS 8 -#define _IOC_TYPEBITS 8 -#define _IOC_SIZEBITS 14 -#define _IOC_DIRBITS 2 - -#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) -#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) -#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) -#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) - -#define _IOC_NRSHIFT 0 -#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) -#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) -#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) - -/* - * Direction bits. - */ -#define _IOC_NONE 0U -#define _IOC_WRITE 1U -#define _IOC_READ 2U - -#define _IOC(dir,type,nr,size) \ - (((dir) << _IOC_DIRSHIFT) | \ - ((type) << _IOC_TYPESHIFT) | \ - ((nr) << _IOC_NRSHIFT) | \ - ((size) << _IOC_SIZESHIFT)) - -/* provoke compile error for invalid uses of size argument */ -extern unsigned long __invalid_size_argument_for_IOC; -#define _IOC_TYPECHECK(t) \ - ((sizeof(t) == sizeof(t[1]) && \ - sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ - sizeof(t) : __invalid_size_argument_for_IOC) - -/* used to create numbers */ -#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) -#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) -#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) -#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) - -/* used to decode ioctl numbers.. */ -#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) -#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) -#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) -#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) - -/* ...and for the drivers/sound files... */ - -#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) -#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) -#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) -#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) -#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) - -#endif /* _S390_IOCTL_H */ +#include <asm-generic/ioctl.h> diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h index 54cf7d9f251..ce28ddda0f5 100644 --- a/include/asm-s390/kexec.h +++ b/include/asm-s390/kexec.h @@ -35,8 +35,9 @@ #define KEXEC_ARCH KEXEC_ARCH_S390 #define MAX_NOTE_BYTES 1024 -typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; -extern note_buf_t crash_notes[]; +/* Provide a dummy definition to avoid build failures. */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) { } #endif /*_S390_KEXEC_H */ diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h index ea86bd12204..c8d5409b5d5 100644 --- a/include/asm-s390/mman.h +++ b/include/asm-s390/mman.h @@ -43,6 +43,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-s390/mutex.h b/include/asm-s390/mutex.h new file mode 100644 index 00000000000..458c1f7fbc1 --- /dev/null +++ b/include/asm-s390/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 4ec652ebb3b..c5cbc4bd841 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs); extern void show_trace(struct task_struct *task, unsigned long *sp); unsigned long get_wchan(struct task_struct *p); -#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \ - ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs))) -#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr) -#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15]) +#define task_pt_regs(tsk) ((struct pt_regs *) \ + (task_stack_page(tsk) + THREAD_SIZE) - 1) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) /* * Give up the time slice of the virtual PU. diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h index 0ddf0a8ef8d..7bc15f0231d 100644 --- a/include/asm-s390/qdio.h +++ b/include/asm-s390/qdio.h @@ -195,12 +195,14 @@ struct qdr { /* * queue information block (QIB) */ -#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 -#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 +#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 +#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 +#define QIB_RFLAGS_ENABLE_QEBSM 0x80 + struct qib { unsigned int qfmt : 8; /* queue format */ unsigned int pfmt : 8; /* impl. dep. parameter format */ - unsigned int res1 : 8; /* reserved */ + unsigned int rflags : 8; /* QEBSM */ unsigned int ac : 8; /* adapter characteristics */ unsigned int res2; /* reserved */ #ifdef QDIO_32_BIT diff --git a/include/asm-s390/s390_rdev.h b/include/asm-s390/s390_rdev.h new file mode 100644 index 00000000000..6fa20442a48 --- /dev/null +++ b/include/asm-s390/s390_rdev.h @@ -0,0 +1,15 @@ +/* + * include/asm-s390/ccwdev.h + * + * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Carsten Otte <cotte@de.ibm.com> + * + * Interface for s390 root device + */ + +#ifndef _S390_RDEV_H_ +#define _S390_RDEV_H_ +extern struct device *s390_root_dev_register(const char *); +extern void s390_root_dev_unregister(struct device *); +#endif /* _S390_RDEV_H_ */ diff --git a/include/asm-s390/sigcontext.h b/include/asm-s390/sigcontext.h index 803545351dd..aeb6e0b1332 100644 --- a/include/asm-s390/sigcontext.h +++ b/include/asm-s390/sigcontext.h @@ -8,6 +8,8 @@ #ifndef _ASM_S390_SIGCONTEXT_H #define _ASM_S390_SIGCONTEXT_H +#include <linux/compiler.h> + #define __NUM_GPRS 16 #define __NUM_FPRS 16 #define __NUM_ACRS 16 diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 864cae7e1fd..b2e65e8bf81 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -104,14 +104,25 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + #ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_user_vtime(struct task_struct *); +extern void account_vtime(struct task_struct *); +extern void account_tick_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); #endif #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ - account_system_vtime(prev); \ + account_vtime(prev); \ } while (0) #define nop() __asm__ __volatile__ ("nop") diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index 6c18a3f2431..f3797a52c4e 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h @@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void) #define alloc_thread_info(tsk) ((struct thread_info *) \ __get_free_pages(GFP_KERNEL,THREAD_ORDER)) #define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) -#define get_thread_info(ti) get_task_struct((ti)->task) -#define put_thread_info(ti) put_task_struct((ti)->task) #endif diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 10a619da476..be104f21c70 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h @@ -61,8 +61,10 @@ #define segment_eq(a,b) ((a).ar4 == (b).ar4) -#define __access_ok(addr,size) (1) - +static inline int __access_ok(const void *addr, unsigned long size) +{ + return 1; +} #define access_ok(type,addr,size) __access_ok(addr,size) /* @@ -206,25 +208,25 @@ extern int __put_user_bad(void) __attribute__((noreturn)); case 1: { \ unsigned char __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ default: \ diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f97d92691f1..2861cdc243a 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -539,7 +539,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION -# ifdef CONFIG_ARCH_S390_31 +# ifndef CONFIG_64BIT # define __ARCH_WANT_STAT64 # define __ARCH_WANT_SYS_TIME # endif diff --git a/include/asm-s390/vtoc.h b/include/asm-s390/vtoc.h index 41d369f38b0..d1de5b7ebb0 100644 --- a/include/asm-s390/vtoc.h +++ b/include/asm-s390/vtoc.h @@ -176,4 +176,28 @@ struct vtoc_format7_label struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ } __attribute__ ((packed)); +struct vtoc_cms_label { + u8 label_id[4]; /* Label identifier */ + u8 vol_id[6]; /* Volid */ + u16 version_id; /* Version identifier */ + u32 block_size; /* Disk block size */ + u32 origin_ptr; /* Disk origin pointer */ + u32 usable_count; /* Number of usable cylinders/blocks */ + u32 formatted_count; /* Maximum number of formatted cylinders/ + * blocks */ + u32 block_count; /* Disk size in CMS blocks */ + u32 used_count; /* Number of CMS blocks in use */ + u32 fst_size; /* File Status Table (FST) size */ + u32 fst_count; /* Number of FSTs per CMS block */ + u8 format_date[6]; /* Disk FORMAT date */ + u8 reserved1[2]; + u32 disk_offset; /* Disk offset when reserved*/ + u32 map_block; /* Allocation Map Block with next hole */ + u32 hblk_disp; /* Displacement into HBLK data of next hole */ + u32 user_disp; /* Displacement into user part of Allocation + * map */ + u8 reserved2[4]; + u8 segment_name[8]; /* Name of shared segment */ +} __attribute__ ((packed)); + #endif /* _ASM_S390_VTOC_H */ |