summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/system.h
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-05-08 00:35:02 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 11:15:20 -0700
commita436ed9c5106b41606cbb55ab3b28389fe8ae04f (patch)
treeb8df0bde6d7eb2808c37da815d8857396ee1eaf1 /include/asm-x86_64/system.h
parent5dc12ddee93d63d7107cbbf70db23476d7b30e43 (diff)
x86: create asm/cmpxchg.h
i386: Rearrange the cmpxchg code to allow atomic.h to get it without needing to include system.h. This kills warnings in the UML build from atomic.h about implicit declarations of cmpxchg symbols. The i386 build presumably isn't seeing this because a separate inclusion of system.h is covering it over. The cmpxchg stuff is moved to asm-i386/cmpxchg.h, with an include left in system.h for the benefit of generic code which expects cmpxchg there. Meanwhile, atomic.h includes cmpxchg.h. This causes no noticable damage to the i386 build. x86_64: Move cmpxchg into its own header. atomic.h already included system.h, so this is changed to include cmpxchg.h. This is purely cleanup - it's not fixing any warnings - so if the x86_64 system.h isn't considered as cleanup-worthy as i386, then this can be dropped. It causes no noticable damage to the x86_64 build. uml: The i386 and x86_64 cmpxchg patches require an asm-um/cmpxchg.h for the UML build. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64/system.h')
-rw-r--r--include/asm-x86_64/system.h130
1 files changed, 1 insertions, 129 deletions
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 5316f3cac23..b7b8021e8c4 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <asm/segment.h>
-#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
#ifdef __KERNEL__
@@ -124,134 +124,6 @@ static inline void sched_cacheflush(void)
#define nop() __asm__ __volatile__ ("nop")
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define __xg(x) ((volatile long *)(x))
-
-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-{
- *ptr = val;
-}
-
-#define _set_64bit set_64bit
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-#define cmpxchg_local(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()