summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/local.h13
-rw-r--r--include/asm-generic/mutex-dec.h30
-rw-r--r--include/asm-generic/mutex-xchg.h33
3 files changed, 43 insertions, 33 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index de4614840c2..9291c24f581 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -7,8 +7,15 @@
#include <asm/atomic.h>
#include <asm/types.h>
-/* An unsigned long type for operations which are atomic for a single
- * CPU. Usually used in combination with per-cpu variables. */
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic_long_t. Which is
+ * rather pointless. The whole point behind local_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local_t allows exploitation of such capabilities.
+ */
/* Implement in terms of atomics. */
@@ -20,7 +27,7 @@ typedef struct
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a))
+#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l,i) atomic_long_set((&(l)->a),(i))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index 40c6d1f8659..29c6ac34e23 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -17,13 +17,14 @@
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- if (unlikely(atomic_dec_return(count) < 0)) \
- fail_fn(count); \
- else \
- smp_mb(); \
-} while (0)
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ fail_fn(count);
+ else
+ smp_mb();
+}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -36,7 +37,7 @@ do { \
* or anything the slow path function returns.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -59,12 +60,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- smp_mb(); \
- if (unlikely(atomic_inc_return(count) <= 0)) \
- fail_fn(count); \
-} while (0)
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_inc_return(count) <= 0))
+ fail_fn(count);
+}
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 1d24f47e6c4..32a2100c1ae 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -3,7 +3,7 @@
*
* Generic implementation of the mutex fastpath, based on xchg().
*
- * NOTE: An xchg based implementation is less optimal than an atomic
+ * NOTE: An xchg based implementation might be less optimal than an atomic
* decrement/increment based implementation. If your architecture
* has a reasonable atomic dec/inc then you should probably use
* asm-generic/mutex-dec.h instead, or you could open-code an
@@ -22,14 +22,14 @@
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- if (unlikely(atomic_xchg(count, 0) != 1)) \
- fail_fn(count); \
- else \
- smp_mb(); \
-} while (0)
-
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ fail_fn(count);
+ else
+ smp_mb();
+}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -42,7 +42,7 @@ do { \
* or anything the slow path function returns
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
@@ -64,12 +64,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- smp_mb(); \
- if (unlikely(atomic_xchg(count, 1) != 0)) \
- fail_fn(count); \
-} while (0)
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_xchg(count, 1) != 0))
+ fail_fn(count);
+}
#define __mutex_slowpath_needs_to_unlock() 0