summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-06-29 14:56:13 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-06-29 14:56:13 +0200
commitc406abd3a6d0cf5ce8db4db155a729a28fb98c4f (patch)
tree3b837f54a28a9f9754f8b34fd9cc4847557025e9 /include
parent0a6047eef1c465c38aacfbdab193161b3f0cd144 (diff)
[S390] cleanup bitops.h.
Encapsulate complete bitops.h with #ifdef __KERNEL__ and remove the now superfluous ALIGN_CS define and its users. This patch is needed for compiling klibc. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/bitops.h42
1 files changed, 3 insertions, 39 deletions
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 4d2b126ba15..0ddcdba79e4 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -12,6 +12,9 @@
* Copyright (C) 1992, Linus Torvalds
*
*/
+
+#ifdef __KERNEL__
+
#include <linux/compiler.h>
/*
@@ -50,19 +53,6 @@
* with operation of the form "set_bit(bitnr, flags)".
*/
-/* set ALIGN_CS to 1 if the SMP safe bit operations should
- * align the address to 4 byte boundary. It seems to work
- * without the alignment.
- */
-#ifdef __KERNEL__
-#define ALIGN_CS 0
-#else
-#define ALIGN_CS 1
-#ifndef CONFIG_SMP
-#error "bitops won't work without CONFIG_SMP"
-#endif
-#endif
-
/* bitmap tables from arch/S390/kernel/bitmap.S */
extern const char _oi_bitmap[];
extern const char _ni_bitmap[];
@@ -121,10 +111,6 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR mask */
@@ -141,10 +127,6 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND mask */
@@ -161,10 +143,6 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR mask */
@@ -182,10 +160,6 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR/test mask */
@@ -205,10 +179,6 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND/test mask */
@@ -228,10 +198,6 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR/test mask */
@@ -834,8 +800,6 @@ static inline int sched_find_first_bit(unsigned long *b)
#include <asm-generic/bitops/hweight.h>
-#ifdef __KERNEL__
-
/*
* ATTENTION: intel byte ordering convention for ext2 and minix !!
* bit 0 is the LSB of addr; bit 31 is the MSB of addr;