From b924a69067b00d3121debae5a738fb0bcbbbb03c Mon Sep 17 00:00:00 2001
From: Chen Gang <gang.chen@asianux.com>
Date: Wed, 25 Sep 2013 12:14:08 +0800
Subject: tile: include: asm: use 'long long' instead of 'u64' for atomic64_t
 and its related functions

atomic* value is signed value, and atomic* functions need also process
signed value (parameter value, and return value), so use 'long long'
instead of 'u64'.

After replacement, it will also fix a bug for atomic64_add_negative():
"u64 is never less than 0".

The modifications are:

  in vim, use "1,% s/\<u64\>/long long/g" command.
  remove redundant '__aligned(8)'.
  be sure of 80 (and macro '\') columns limitation after replacement.

Signed-off-by: Chen Gang <gang.chen@asianux.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> [re-instated const cast]
---
 arch/tile/include/asm/cmpxchg.h | 28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

(limited to 'arch/tile/include/asm/cmpxchg.h')

diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4b..0ccda3c425b 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
 int _atomic_xchg_add(int *v, int i);
 int _atomic_xchg_add_unless(int *v, int a, int u);
 int _atomic_cmpxchg(int *ptr, int o, int n);
-u64 _atomic64_xchg(u64 *v, u64 n);
-u64 _atomic64_xchg_add(u64 *v, u64 i);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+long long _atomic64_xchg(long long *v, long long n);
+long long _atomic64_xchg_add(long long *v, long long i);
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
+long long _atomic64_cmpxchg(long long *v, long long o, long long n);
 
 #define xchg(ptr, n)							\
 	({								\
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
 		if (sizeof(*(ptr)) != 4)				\
 			__cmpxchg_called_with_bad_pointer();		\
 		smp_mb();						\
-		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,	\
+						(int)n);		\
 	})
 
 #define xchg64(ptr, n)							\
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
 		if (sizeof(*(ptr)) != 8)				\
 			__xchg_called_with_bad_pointer();		\
 		smp_mb();						\
-		(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n));	\
+		(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),	\
+						(long long)(n));	\
 	})
 
 #define cmpxchg64(ptr, o, n)						\
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
 		if (sizeof(*(ptr)) != 8)				\
 			__cmpxchg_called_with_bad_pointer();		\
 		smp_mb();						\
-		(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+		(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,	\
+					(long long)o, (long long)n);	\
 	})
 
 #else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
 		switch (sizeof(*(ptr))) {				\
 		case 4:							\
 			__x = (typeof(__x))(unsigned long)		\
-				__insn_exch4((ptr), (u32)(unsigned long)(n)); \
+				__insn_exch4((ptr),			\
+					(u32)(unsigned long)(n));	\
 			break;						\
 		case 8:							\
-			__x = (typeof(__x))			\
+			__x = (typeof(__x))				\
 				__insn_exch((ptr), (unsigned long)(n));	\
 			break;						\
 		default:						\
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
 		switch (sizeof(*(ptr))) {				\
 		case 4:							\
 			__x = (typeof(__x))(unsigned long)		\
-				__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
+				__insn_cmpexch4((ptr),			\
+					(u32)(unsigned long)(n));	\
 			break;						\
 		case 8:							\
-			__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
+			__x = (typeof(__x))__insn_cmpexch((ptr),	\
+						(long long)(n));	\
 			break;						\
 		default:						\
 			__cmpxchg_called_with_bad_pointer();		\
-- 
cgit v1.2.3-70-g09d2