summaryrefslogtreecommitdiffstats
path: root/include/linux/kref.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kref.h')
-rw-r--r--include/linux/kref.h42
1 files changed, 39 insertions, 3 deletions
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 4972e6e9ca9..484604d184b 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -39,8 +40,11 @@ static inline void kref_init(struct kref *kref)
*/
static inline void kref_get(struct kref *kref)
{
- WARN_ON(!atomic_read(&kref->refcount));
- atomic_inc(&kref->refcount);
+ /* If refcount was 0 before incrementing then we have a race
+ * condition when this kref is freeing by some other thread right now.
+ * In this case one should use kref_get_unless_zero()
+ */
+ WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
}
/**
@@ -95,12 +99,44 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception. If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count. The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+ void (*release)(struct kref *kref),
+ spinlock_t *lock)
+{
+ unsigned long flags;
+
+ WARN_ON(release == NULL);
+ if (atomic_add_unless(&kref->refcount, -1, 1))
+ return 0;
+ spin_lock_irqsave(lock, flags);
+ if (atomic_dec_and_test(&kref->refcount)) {
+ release(kref);
+ local_irq_restore(flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(lock, flags);
+ return 0;
+}
+
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
WARN_ON(release == NULL);
- if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+ if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock);
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
mutex_unlock(lock);