diff options
-rw-r--r-- | include/asm-i386/mutex.h | 16 | ||||
-rw-r--r-- | kernel/mutex-debug.c | 2 | ||||
-rw-r--r-- | kernel/mutex.c | 11 |
3 files changed, 16 insertions, 13 deletions
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 4e5e3de1b9a..c657d4b09f0 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h @@ -28,7 +28,13 @@ do { \ \ __asm__ __volatile__( \ LOCK " decl (%%eax) \n" \ - " js "#fail_fn" \n" \ + " js 2f \n" \ + "1: \n" \ + \ + LOCK_SECTION_START("") \ + "2: call "#fail_fn" \n" \ + " jmp 1b \n" \ + LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ @@ -78,7 +84,13 @@ do { \ \ __asm__ __volatile__( \ LOCK " incl (%%eax) \n" \ - " jle "#fail_fn" \n" \ + " jle 2f \n" \ + "1: \n" \ + \ + LOCK_SECTION_START("") \ + "2: call "#fail_fn" \n" \ + " jmp 1b \n" \ + LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 4fcb051a8b9..3dec7536378 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c @@ -20,8 +20,6 @@ #include <linux/kallsyms.h> #include <linux/interrupt.h> -#include <asm/mutex.h> - #include "mutex-debug.h" /* diff --git a/kernel/mutex.c b/kernel/mutex.c index 7eb96066144..5c256184419 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -81,15 +81,10 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); */ void fastcall __sched mutex_lock(struct mutex *lock) { + might_sleep(); /* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. - * - * NOTE: if asm/mutex.h is included, then some architectures - * rely on mutex_lock() having _no other code_ here but this - * fastpath. That allows the assembly fastpath to do - * tail-merging optimizations. (If you want to put testcode - * here, do it under #ifndef CONFIG_MUTEX_DEBUG.) */ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); } @@ -115,8 +110,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock) /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: - * - * NOTE: no other code must be here - see mutex_lock() . */ __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } @@ -261,7 +254,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); */ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) { - /* NOTE: no other code must be here - see mutex_lock() */ + might_sleep(); return __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_interruptible_slowpath); } |