From 77ef50a522717fa040636ee1017179ceba12ff62 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 18 Jun 2008 17:08:48 +0200 Subject: x86: consolidate header guards This patch is the result of an automatic script that consolidates the format of all the headers in include/asm-x86/. The format: 1. No leading underscore. Names with leading underscores are reserved. 2. Pathname components are separated by two underscores. So we can distinguish between mm_types.h and mm/types.h. 3. Everything except letters and numbers are turned into single underscores. Signed-off-by: Vegard Nossum --- include/asm-x86/spinlock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 4f9a9861799..cbe01086ba6 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -1,5 +1,5 @@ -#ifndef _X86_SPINLOCK_H_ -#define _X86_SPINLOCK_H_ +#ifndef ASM_X86__SPINLOCK_H +#define ASM_X86__SPINLOCK_H #include #include @@ -366,4 +366,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() -#endif +#endif /* ASM_X86__SPINLOCK_H */ -- cgit v1.2.3-70-g09d2 From 5bbd4c3724008c93cf3efdfc38a3402e245ab506 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 15 Aug 2008 12:56:59 -0400 Subject: x86: spinlock use LOCK_PREFIX Since we are now using DS prefixes instead of NOP to remove LOCK prefixes, there is no longer any problems with instruction boundaries moving around. * Linus Torvalds (torvalds@linux-foundation.org) wrote: > > > On Thu, 14 Aug 2008, Mathieu Desnoyers wrote: > > > > Changing the 0x90 (single-byte nop) currently used into a 0x3E DS segment > > override prefix should fix this issue. Since the default of the atomic > > instructions is to use the DS segment anyway, it should not affect the > > behavior. > > Ok, so I think this is an _excellent_ patch, but I'd like to also then use > LOCK_PREFIX in include/asm-x86/futex.h. > > See commit 9d55b9923a1b7ea8193b8875c57ec940dc2ff027. > > Linus Unless there a rationale for this, I think these be changed to LOCK_PREFIX too. grep "lock ;" include/asm-x86/spinlock.h "lock ; cmpxchgw %w1,%2\n\t" asm volatile("lock ; xaddl %0, %1\n" "lock ; cmpxchgl %1,%2\n\t" Applies to 2.6.27-rc2. Signed-off-by: Mathieu Desnoyers Acked-by: Linus Torvalds CC: Linus Torvalds CC: H. Peter Anvin CC: Jeremy Fitzhardinge CC: Roland McGrath CC: Ingo Molnar Cc: Steven Rostedt CC: Steven Rostedt CC: Thomas Gleixner CC: Peter Zijlstra CC: Andrew Morton CC: David Miller CC: Ulrich Drepper CC: Rusty Russell CC: Gregory Haskins CC: Arnaldo Carvalho de Melo CC: "Luis Claudio R. Goncalves" CC: Clark Williams CC: Christoph Lameter CC: Andi Kleen CC: Harvey Harrison Signed-off-by: H. Peter Anvin --- include/asm-x86/spinlock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 4f9a9861799..0b4d59a93d2 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) "jne 1f\n\t" "movw %w0,%w1\n\t" "incb %h1\n\t" - "lock ; cmpxchgw %w1,%2\n\t" + LOCK_PREFIX "cmpxchgw %w1,%2\n\t" "1:" "sete %b1\n\t" "movzbl %b1,%0\n\t" @@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) int inc = 0x00010000; int tmp; - asm volatile("lock ; xaddl %0, %1\n" + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" "movzwl %w0, %2\n\t" "shrl $16, %0\n\t" "1:\t" @@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) "cmpl %0,%1\n\t" "jne 1f\n\t" "addl $0x00010000, %1\n\t" - "lock ; cmpxchgl %1,%2\n\t" + LOCK_PREFIX "cmpxchgl %1,%2\n\t" "1:" "sete %b1\n\t" "movzbl %b1,%0\n\t" -- cgit v1.2.3-70-g09d2 From 63d3a75d6f1fcf2f33e6abbe84e1f428c3586152 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 19 Aug 2008 13:19:36 -0700 Subject: x86/paravirt: add spin_lock_flags lock op It is useful for a pv_lock_ops backend to know whether interrupts are enabled or not in the context a spin_lock is being called. This allows it to enable interrupts while spinning, which could be particularly helpful when spinning becomes blocking. The default implementation just calls the normal spin_lock op, ignoring the flags. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++ include/asm-x86/paravirt.h | 7 +++++++ include/asm-x86/spinlock.h | 9 +++++++-- 3 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 38d7f7f1dbc..206359a8e43 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -7,12 +7,18 @@ #include +static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) +{ + __raw_spin_lock(lock); +} + struct pv_lock_ops pv_lock_ops = { #ifdef CONFIG_SMP .spin_is_locked = __ticket_spin_is_locked, .spin_is_contended = __ticket_spin_is_contended, .spin_lock = __ticket_spin_lock, + .spin_lock_flags = default_spin_lock_flags, .spin_trylock = __ticket_spin_trylock, .spin_unlock = __ticket_spin_unlock, #endif diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index db9b0647b34..8e9b1266898 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -333,6 +333,7 @@ struct pv_lock_ops { int (*spin_is_locked)(struct raw_spinlock *lock); int (*spin_is_contended)(struct raw_spinlock *lock); void (*spin_lock)(struct raw_spinlock *lock); + void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); int (*spin_trylock)(struct raw_spinlock *lock); void (*spin_unlock)(struct raw_spinlock *lock); }; @@ -1414,6 +1415,12 @@ static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } +static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, + unsigned long flags) +{ + PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); +} + static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index e39c790dbfd..b755ea86367 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -182,8 +182,6 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) - #ifdef CONFIG_PARAVIRT /* * Define virtualization-friendly old-style lock byte lock, for use in @@ -272,6 +270,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { __ticket_spin_unlock(lock); } + +static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, + unsigned long flags) +{ + __raw_spin_lock(lock); +} + #endif /* CONFIG_PARAVIRT */ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) -- cgit v1.2.3-70-g09d2 From ef1f3413284b9270266cb04a944647e59735f0f1 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 5 Sep 2008 13:26:39 +0100 Subject: x86: ticket spin locks: fix asm constraints In addition to these changes I doubt the 'volatile' on all the ticket lock asm()-s are really necessary. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 93adae338ac..acd9bdda55c 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -101,7 +101,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) "1:" "sete %b1\n\t" "movzbl %b1,%0\n\t" - : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) + : "=&a" (tmp), "=&Q" (new), "+m" (lock->slock) : : "memory", "cc"); @@ -146,7 +146,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) /* don't need lfence here, because loads are in-order */ "jmp 1b\n" "2:" - : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) + : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) : : "memory", "cc"); } @@ -166,7 +166,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) "1:" "sete %b1\n\t" "movzbl %b1,%0\n\t" - : "=&a" (tmp), "=r" (new), "+m" (lock->slock) + : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) : : "memory", "cc"); -- cgit v1.2.3-70-g09d2 From 08f5fcbe6e0ea029c7e9b1b1c338700ab7809daf Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 5 Sep 2008 13:26:39 +0100 Subject: x86: ticket spin locks: factor out more common code Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 42 ++++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 26 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index acd9bdda55c..63d3b610a5e 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -54,19 +54,7 @@ * much between them in performance though, especially as locks are out of line. */ #if (NR_CPUS < 256) -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) -{ - int tmp = ACCESS_ONCE(lock->slock); - - return (((tmp >> 8) & 0xff) != (tmp & 0xff)); -} - -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) -{ - int tmp = ACCESS_ONCE(lock->slock); - - return (((tmp >> 8) - tmp) & 0xff) > 1; -} +#define TICKET_SHIFT 8 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { @@ -116,19 +104,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) : "memory", "cc"); } #else -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) -{ - int tmp = ACCESS_ONCE(lock->slock); - - return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); -} - -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) -{ - int tmp = ACCESS_ONCE(lock->slock); - - return (((tmp >> 16) - tmp) & 0xffff) > 1; -} +#define TICKET_SHIFT 16 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { @@ -182,6 +158,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif +static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +{ + int tmp = ACCESS_ONCE(lock->slock); + + return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); +} + +static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +{ + int tmp = ACCESS_ONCE(lock->slock); + + return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; +} + #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #ifdef CONFIG_PARAVIRT -- cgit v1.2.3-70-g09d2 From 74e91604b2452c15bbe72d77b37cf47ed0310d13 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 5 Sep 2008 13:27:45 +0100 Subject: x86: ticket spin locks: reduce instruction dependencies Reduce the amount of partial register accesses in the NR_CPUS < 256 case, and slightly weaken resource dependencies in the other case. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/asm-x86/spinlock.h') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 63d3b610a5e..b5a4551fd56 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -21,8 +21,10 @@ #ifdef CONFIG_X86_32 # define LOCK_PTR_REG "a" +# define REG_PTR_MODE "k" #else # define LOCK_PTR_REG "D" +# define REG_PTR_MODE "q" #endif #if defined(CONFIG_X86_32) && \ @@ -77,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) { - int tmp; - short new; + int tmp, new; - asm volatile("movw %2,%w0\n\t" + asm volatile("movzwl %2, %0\n\t" "cmpb %h0,%b0\n\t" + "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" "jne 1f\n\t" - "movw %w0,%w1\n\t" - "incb %h1\n\t" LOCK_PREFIX "cmpxchgw %w1,%2\n\t" "1:" "sete %b1\n\t" "movzbl %b1,%0\n\t" - : "=&a" (tmp), "=&Q" (new), "+m" (lock->slock) + : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) : : "memory", "cc"); @@ -136,8 +136,8 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) "movl %0,%1\n\t" "roll $16, %0\n\t" "cmpl %0,%1\n\t" + "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" "jne 1f\n\t" - "addl $0x00010000, %1\n\t" LOCK_PREFIX "cmpxchgl %1,%2\n\t" "1:" "sete %b1\n\t" -- cgit v1.2.3-70-g09d2