diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 15:35:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 15:35:32 -0700 |
commit | 0798b1dbfbd9ff2a370c5968c5f0621ef0075fe0 (patch) | |
tree | c7f61ab9683786a070da0933b9981fc74a4d865f /arch/tile/include/asm/atomic_64.h | |
parent | ad363e0916423b2e6cdfcdc30ae707ec709f0a65 (diff) | |
parent | 6738d3210aabe3016a1b03cd98a7fc479c229197 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (26 commits)
arch/tile: prefer "tilepro" as the name of the 32-bit architecture
compat: include aio_abi.h for aio_context_t
arch/tile: cleanups for tilegx compat mode
arch/tile: allocate PCI IRQs later in boot
arch/tile: support signal "exception-trace" hook
arch/tile: use better definitions of xchg() and cmpxchg()
include/linux/compat.h: coding-style fixes
tile: add an RTC driver for the Tilera hypervisor
arch/tile: finish enabling support for TILE-Gx 64-bit chip
compat: fixes to allow working with tile arch
arch/tile: update defconfig file to something more useful
tile: do_hardwall_trap: do not play with task->sighand
tile: replace mm->cpu_vm_mask with mm_cpumask()
tile,mn10300: add device parameter to dma_cache_sync()
audit: support the "standard" <asm-generic/unistd.h>
arch/tile: clarify flush_buffer()/finv_buffer() function names
arch/tile: kernel-related cleanups from removing static page size
arch/tile: various header improvements for building drivers
arch/tile: disable GX prefetcher during cache flush
arch/tile: tolerate disabling CONFIG_BLK_DEV_INITRD
...
Diffstat (limited to 'arch/tile/include/asm/atomic_64.h')
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 156 |
1 files changed, 156 insertions, 0 deletions
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h new file mode 100644 index 00000000000..1c1e60d8ccb --- /dev/null +++ b/arch/tile/include/asm/atomic_64.h @@ -0,0 +1,156 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * Do not include directly; use <asm/atomic.h>. + */ + +#ifndef _ASM_TILE_ATOMIC_64_H +#define _ASM_TILE_ATOMIC_64_H + +#ifndef __ASSEMBLY__ + +#include <arch/spr_def.h> + +/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ + +#define atomic_set(v, i) ((v)->counter = (i)) + +/* + * The smp_mb() operations throughout are to support the fact that + * Linux requires memory barriers before and after the operation, + * on any routine which updates memory and returns a value. + */ + +static inline int atomic_cmpxchg(atomic_t *v, int o, int n) +{ + int val; + __insn_mtspr(SPR_CMPEXCH_VALUE, o); + smp_mb(); /* barrier for proper semantics */ + val = __insn_cmpexch4((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline int atomic_xchg(atomic_t *v, int n) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_exch4((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + __insn_fetchadd4((void *)&v->counter, i); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_fetchadd4((void *)&v->counter, i) + i; + barrier(); /* the "+ i" above will wait on memory */ + return val; +} + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int guess, oldval = v->counter; + do { + if (oldval == u) + break; + guess = oldval; + oldval = atomic_cmpxchg(v, guess, guess + a); + } while (guess != oldval); + return oldval != u; +} + +/* Now the true 64-bit operations. */ + +#define ATOMIC64_INIT(i) { (i) } + +#define atomic64_read(v) ((v)->counter) +#define atomic64_set(v, i) ((v)->counter = (i)) + +static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n) +{ + long val; + smp_mb(); /* barrier for proper semantics */ + __insn_mtspr(SPR_CMPEXCH_VALUE, o); + val = __insn_cmpexch((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline long atomic64_xchg(atomic64_t *v, long n) +{ + long val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_exch((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline void atomic64_add(long i, atomic64_t *v) +{ + __insn_fetchadd((void *)&v->counter, i); +} + +static inline long atomic64_add_return(long i, atomic64_t *v) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_fetchadd((void *)&v->counter, i) + i; + barrier(); /* the "+ i" above will wait on memory */ + return val; +} + +static inline long atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long guess, oldval = v->counter; + do { + if (oldval == u) + break; + guess = oldval; + oldval = atomic64_cmpxchg(v, guess, guess + a); + } while (guess != oldval); + return oldval != u; +} + +#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) +#define atomic64_sub(i, v) atomic64_add(-(i), (v)) +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_dec(v) atomic64_sub(1, (v)) + +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) +#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) +#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +/* Atomic dec and inc don't implement barrier, so provide them if needed. */ +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +/* Define this to indicate that cmpxchg is an efficient operation. */ +#define __HAVE_ARCH_CMPXCHG + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_TILE_ATOMIC_64_H */ |