summaryrefslogtreecommitdiff
path: root/include/linux/lockref.h
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2013-08-28 18:13:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-08-28 18:13:27 -0700
commit0f8f2aaaab0b0f9c13635cb02e7d19bdaa9aa1bb (patch)
treeda409bc0a2149914582c9f8d834ebc27a2ae9cde /include/linux/lockref.h
parentf0cc6ffb8ce8961db587e5072168cac0cbc25f05 (diff)
downloadlwn-0f8f2aaaab0b0f9c13635cb02e7d19bdaa9aa1bb.tar.gz
lwn-0f8f2aaaab0b0f9c13635cb02e7d19bdaa9aa1bb.zip
Add new lockref infrastructure reference implementation
This introduces a new "lockref" structure that supports the concept of lockless updates of reference counts that still honor an attached spinlock. NOTE! This reference implementation is not the optimized lockless version, rather it is the fallback implementation using standard spinlocks. The actual optimized versions will be merged into 3.12, but I wanted to get the infrastructure in place and document the new interfaces. [ Also note that this particular commit is drastically cut-down minimal version of the original patch by Waiman. In order to properly credit the original author I'm marking Waiman as the author here, but in the end this patch bears little resemblance to the patch by Waiman. So blame any errors on me editing things down to the point where I can introduce the infrastructure before the merge window for 3.12 actually opens. - Linus ] Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/lockref.h')
-rw-r--r--include/linux/lockref.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000000..01233e01627a
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+
+struct lockref {
+ spinlock_t lock;
+ unsigned int count;
+};
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockcnt: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+static inline void lockref_get(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count is 0
+ */
+static inline int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval = 0;
+
+ spin_lock(&lockref->lock);
+ if (lockref->count) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+static inline int lockref_put_or_lock(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+
+#endif /* __LINUX_LOCKREF_H */