diff options
author | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:13 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:13 -0400 |
commit | eae7975ddf031b3084f4a5f7d88f698aefad96fb (patch) | |
tree | 5e5dd384eac28d12ac6ebcf197f92f543bd44c0d /include | |
parent | d630dc4c9adb41e5bd1e06df2dbeaf622469ddd5 (diff) |
percpu-refcount: add helpers for ->percpu_count accesses
* All four percpu_ref_*() operations implemented in the header file
perform the same operation to determine whether the percpu_ref is
alive and extract the percpu pointer. Factor out the common logic
into __pcpu_ref_alive(). This doesn't change the generated code.
* There are a couple places in percpu-refcount.c which masks out
PCPU_REF_DEAD to obtain the percpu pointer. Factor it out into
pcpu_count_ptr().
* The above changes make the WARN_ON_ONCE() conditional at the top of
percpu_ref_kill_and_confirm() the only user of REF_STATUS(). Test
PCPU_REF_DEAD directly and remove REF_STATUS().
This patch doesn't introduce any functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/percpu-refcount.h | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index bfdeb0d48e2..b62a4ee6d6a 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -88,10 +88,25 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_REF_PTR 0 #define PCPU_REF_DEAD 1 -#define REF_STATUS(count) (((unsigned long) count) & PCPU_REF_DEAD) +/* + * Internal helper. Don't use outside percpu-refcount proper. The + * function doesn't return the pointer and let the caller test it for NULL + * because doing so forces the compiler to generate two conditional + * branches as it can't assume that @ref->pcpu_count is not NULL. + */ +static inline bool __pcpu_ref_alive(struct percpu_ref *ref, + unsigned __percpu **pcpu_countp) +{ + unsigned long pcpu_ptr = (unsigned long)ACCESS_ONCE(ref->pcpu_count); + + if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + return false; + + *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + return true; +} /** * percpu_ref_get - increment a percpu refcount @@ -105,9 +120,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_inc(*pcpu_count); else atomic_inc(&ref->count); @@ -131,9 +144,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } else { @@ -166,9 +177,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } @@ -191,9 +200,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_dec(*pcpu_count); else if (unlikely(atomic_dec_and_test(&ref->count))) ref->release(ref); |