diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-27 10:42:48 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-27 10:42:52 +0100 |
commit | 4708ac49305bbcd511600d4af71a4c6dca15af65 (patch) | |
tree | 77ba5c83a7d4f4a119e3fd524d82adc297df1e35 /arch/arm/include | |
parent | 1dbd30e9890fd69e50b17edd70ca583546b0fe4e (diff) | |
parent | 2f7989efd4398d92b8adffce2e07dd043a0895fe (diff) |
Merge branch 'origin' into misc
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 132 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/udc_pxa2xx.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/processor.h | 4 |
3 files changed, 72 insertions, 68 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index a0162fa9456..7e79503ab89 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) do { __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%2]\n" + "ldrex %1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "Ir" (old), "r" (new) : "cc"); } while (res); @@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%2]\n" -" bic %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (tmp), "=&r" (tmp2) + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) : "r" (addr), "Ir" (mask) : "cc"); } @@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v) __asm__ __volatile__("@ atomic64_read\n" " ldrexd %0, %H0, [%1]" : "=&r" (result) - : "r" (&v->counter) + : "r" (&v->counter), "Qo" (v->counter) ); return result; @@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i) u64 tmp; __asm__ __volatile__("@ atomic64_set\n" -"1: ldrexd %0, %H0, [%1]\n" -" strexd %0, %2, %H2, [%1]\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %0, %3, %H3, [%2]\n" " teq %0, #0\n" " bne 1b" - : "=&r" (tmp) + : "=&r" (tmp), "=Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v) unsigned long tmp; __asm__ __volatile__("@ atomic64_add\n" -"1: ldrexd %0, %H0, [%2]\n" -" adds %0, %0, %3\n" -" adc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_add_return\n" -"1: ldrexd %0, %H0, [%2]\n" -" adds %0, %0, %3\n" -" adc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); @@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) unsigned long tmp; __asm__ __volatile__("@ atomic64_sub\n" -"1: ldrexd %0, %H0, [%2]\n" -" subs %0, %0, %3\n" -" sbc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_sub_return\n" -"1: ldrexd %0, %H0, [%2]\n" -" subs %0, %0, %3\n" -" sbc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); @@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) do { __asm__ __volatile__("@ atomic64_cmpxchg\n" - "ldrexd %1, %H1, [%2]\n" + "ldrexd %1, %H1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "teqeq %H1, %H3\n" - "strexdeq %0, %4, %H4, [%2]" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "teqeq %H1, %H4\n" + "strexdeq %0, %5, %H5, [%3]" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (old), "r" (new) : "cc"); } while (res); @@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) smp_mb(); __asm__ __volatile__("@ atomic64_xchg\n" -"1: ldrexd %0, %H0, [%2]\n" -" strexd %1, %3, %H3, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" strexd %1, %4, %H4, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (new) : "cc"); @@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_dec_if_positive\n" -"1: ldrexd %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" " subs %0, %0, #1\n" " sbc %H0, %H0, #0\n" " teq %H0, #0\n" " bmi 2f\n" -" strexd %1, %0, %H0, [%2]\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b\n" "2:" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); @@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) smp_mb(); __asm__ __volatile__("@ atomic64_add_unless\n" -"1: ldrexd %0, %H0, [%3]\n" -" teq %0, %4\n" -" teqeq %H0, %H4\n" +"1: ldrexd %0, %H0, [%4]\n" +" teq %0, %5\n" +" teqeq %H0, %H5\n" " moveq %1, #0\n" " beq 2f\n" -" adds %0, %0, %5\n" -" adc %H0, %H0, %H5\n" -" strexd %2, %0, %H0, [%3]\n" +" adds %0, %0, %6\n" +" adc %H0, %H0, %H6\n" +" strexd %2, %0, %H0, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" - : "=&r" (val), "=&r" (ret), "=&r" (tmp) + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index f3eabf1ecec..833306ee9e7 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h @@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info { * here. Note that sometimes the signals go through inverters... */ bool gpio_vbus_inverted; - u16 gpio_vbus; /* high == vbus present */ + int gpio_vbus; /* high == vbus present */ bool gpio_pullup_inverted; - u16 gpio_pullup; /* high == pullup activated */ + int gpio_pullup; /* high == pullup activated */ }; diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 6a89567ffc5..7bed3daf83b 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -91,7 +91,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +#if __LINUX_ARM_ARCH__ == 6 +#define cpu_relax() smp_mb() +#else #define cpu_relax() barrier() +#endif /* * Create a new kernel thread |