diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-10-19 07:54:24 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-10-19 07:55:09 -0700 |
commit | 4533d86270d7986e00594495dde9a109d6be27ae (patch) | |
tree | c2473cac653f7b98e5bd5e6475e63734be4b7644 /lib/spinlock_debug.c | |
parent | 21c5e50e15b1abd797e62f18fd7f90b9cc004cbd (diff) | |
parent | 5bc66170dc486556a1e36fd384463536573f4b82 (diff) |
Merge commit '5bc66170dc486556a1e36fd384463536573f4b82' into x86/urgent
From Borislav Petkov <bp@amd64.org>:
Below is a RAS fix which reverts the addition of a sysfs attribute
which we agreed is not needed, post-factum. And this should go in now
because that sysfs attribute is going to end up in 3.7 otherwise and
thus exposed to userspace; removing it then would be a lot harder.
This is done as a merge rather than a simple patch/cherry-pick since
the baseline for this patch was not in the previous x86/urgent.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'lib/spinlock_debug.c')
-rw-r--r-- | lib/spinlock_debug.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index eb10578ae05..0374a596cff 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; - int print_once = 1; - for (;;) { - for (i = 0; i < loops; i++) { - if (arch_spin_trylock(&lock->raw_lock)) - return; - __delay(1); - } - /* lockup suspected: */ - if (print_once) { - print_once = 0; - spin_dump(lock, "lockup suspected"); + for (i = 0; i < loops; i++) { + if (arch_spin_trylock(&lock->raw_lock)) + return; + __delay(1); + } + /* lockup suspected: */ + spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP - trigger_all_cpu_backtrace(); + trigger_all_cpu_backtrace(); #endif - } - } + + /* + * The trylock above was causing a livelock. Give the lower level arch + * specific lock code a chance to acquire the lock. We have already + * printed a warning/backtrace at this point. The non-debug arch + * specific code might actually succeed in acquiring the lock. If it is + * not successful, the end-result is the same - there is no forward + * progress. + */ + arch_spin_lock(&lock->raw_lock); } void do_raw_spin_lock(raw_spinlock_t *lock) |