From 3363fbdd6fb4992ffe6c17c0dd7388ffa22d99e6 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 27 Apr 2006 18:40:12 -0700 Subject: [PATCH] s390: futex atomic operations Add support for atomic futex operations. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/futex.h | 123 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 119 insertions(+), 4 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index 6a332a9f099..40c25e166a9 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h @@ -1,6 +1,121 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H +#ifndef _ASM_S390_FUTEX_H +#define _ASM_S390_FUTEX_H -#include +#ifdef __KERNEL__ -#endif +#include +#include +#include + +#ifndef __s390x__ +#define __futex_atomic_fixup \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 0b,2b,1b,2b\n" \ + ".previous" +#else /* __s390x__ */ +#define __futex_atomic_fixup \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n" \ + " .quad 0b,2b,1b,2b\n" \ + ".previous" +#endif /* __s390x__ */ + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile(" l %1,0(%6)\n" \ + "0: " insn \ + " cs %1,%2,0(%6)\n" \ + "1: jl 0b\n" \ + " lhi %0,0\n" \ + "2:\n" \ + __futex_atomic_fixup \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc" ); + +static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, newval, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + int ret; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + asm volatile(" cs %1,%4,0(%5)\n" + "0: lr %0,%1\n" + "1:\n" +#ifndef __s390x__ + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,1b\n" + ".previous" +#else /* __s390x__ */ + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" +#endif /* __s390x__ */ + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + return oldval; +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_S390_FUTEX_H */ -- cgit v1.2.3-70-g09d2 From 58268b97f679108d32a882a7fc029585da801975 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 27 Apr 2006 18:40:24 -0700 Subject: [PATCH] s390: add read_mostly optimization Add a read_mostly section and define __read_mostly to prevent cache line pollution due to writes for mostly read variables. In addition fix the incorrect alignment of the cache_line_aligned data section. s390 has a cacheline size of 256 bytes. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/vmlinux.lds.S | 4 +++- include/asm-s390/cache.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/asm-s390') diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 9289face302..9f34bb54c05 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -58,9 +58,11 @@ SECTIONS . = ALIGN(4096); .data.page_aligned : { *(.data.idt) } - . = ALIGN(32); + . = ALIGN(256); .data.cacheline_aligned : { *(.data.cacheline_aligned) } + . = ALIGN(256); + .data.read_mostly : { *(.data.read_mostly) } _edata = .; /* End of data section */ . = ALIGN(8192); /* init_task */ diff --git a/include/asm-s390/cache.h b/include/asm-s390/cache.h index e20cdd9074d..cdf431b061b 100644 --- a/include/asm-s390/cache.h +++ b/include/asm-s390/cache.h @@ -16,4 +16,6 @@ #define ARCH_KMALLOC_MINALIGN 8 +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif -- cgit v1.2.3-70-g09d2