diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-07-29 08:09:44 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-07-29 08:09:44 +0900 |
commit | f15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch) | |
tree | 774d7b11abaaf33561ab8268bf51ddd9ceb79025 /arch/sh/include/asm/atomic-llsc.h | |
parent | 25326277d8d1393d1c66240e6255aca780f9e3eb (diff) |
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac.
Most of the moving about was done with Sam's directions at:
http://marc.info/?l=linux-sh&m=121724823706062&w=2
with subsequent hacking and fixups entirely my fault.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/atomic-llsc.h')
-rw-r--r-- | arch/sh/include/asm/atomic-llsc.h | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h new file mode 100644 index 00000000000..4b00b78e3f4 --- /dev/null +++ b/arch/sh/include/asm/atomic-llsc.h @@ -0,0 +1,107 @@ +#ifndef __ASM_SH_ATOMIC_LLSC_H +#define __ASM_SH_ATOMIC_LLSC_H + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +/* + * SH-4A note: + * + * We basically get atomic_xxx_return() for free compared with + * atomic_xxx(). movli.l/movco.l require r0 due to the instruction + * encoding, so the retval is automatically set without having to + * do any special work. + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add_return \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub_return \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_clear_mask \n" +" and %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (~mask), "r" (&v->counter) + : "t"); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_set_mask \n" +" or %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (mask), "r" (&v->counter) + : "t"); +} + +#endif /* __ASM_SH_ATOMIC_LLSC_H */ |