summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 15:28:05 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 15:28:05 -0500
commitbad97817dece759dd6c0b24f862b7d0ed588edda (patch)
treefcabaa3ae3e2f17236135e60dc875f47e852cc6d /arch/x86/include/asm
parent6fe19278ffebdd57e5c5ec10275e6d423404364e (diff)
parent61e6cfa80de5760bbe406f4e815b7739205754d2 (diff)
Merge tag 'v3.12-rc5' into stable/for-linus-3.13
Linux 3.12-rc5 Because the Stefano branch (for SWIOTLB ARM changes) is based on that. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * tag 'v3.12-rc5': (550 commits) Linux 3.12-rc5 watchdog: sunxi: Fix section mismatch watchdog: kempld_wdt: Fix bit mask definition watchdog: ts72xx_wdt: locking bug in ioctl ARM: exynos: dts: Update 5250 arch timer node with clock frequency parisc: let probe_kernel_read() capture access to page zero parisc: optimize variable initialization in do_page_fault parisc: fix interruption handler to respect pagefault_disable() parisc: mark parisc_terminate() noreturn and cold. parisc: remove unused syscall_ipi() function. parisc: kill SMP single function call interrupt parisc: Export flush_cache_page() (needed by lustre) vfs: allow O_PATH file descriptors for fstatfs() ext4: fix memory leak in xattr ARC: Ignore ptrace SETREGSET request for synthetic register "stop_pc" ALSA: hda - Sony VAIO Pro 13 (haswell) now has a working headset jack ALSA: hda - Add a headset mic model for ALC269 and friends ALSA: hda - Fix microphone for Sony VAIO Pro 13 (Haswell model) compiler/gcc4: Add quirk for 'asm goto' miscompilation bug Revert "i915: Update VGA arbiter support for newer devices" ...
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
3 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d..89270b4318d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
* Catch too early usage of this before alternatives
* have run.
*/
- asm goto("1: jmp %l[t_warn]\n"
+ asm_volatile_goto("1: jmp %l[t_warn]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
#endif
- asm goto("1: jmp %l[t_no]\n"
+ asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
* have. Thus, we force the jump to the widest, 4-byte, signed relative
* offset even though the last would often fit in less bytes.
*/
- asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+ asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800..6a2cefb4395 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:"
+ asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648e..07537a44216 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
static inline void __mutex_fastpath_lock(atomic_t *v,
void (*fail_fn)(atomic_t *))
{
- asm volatile goto(LOCK_PREFIX " decl %0\n"
+ asm_volatile_goto(LOCK_PREFIX " decl %0\n"
" jns %l[exit]\n"
: : "m" (v->counter)
: "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
static inline void __mutex_fastpath_unlock(atomic_t *v,
void (*fail_fn)(atomic_t *))
{
- asm volatile goto(LOCK_PREFIX " incl %0\n"
+ asm_volatile_goto(LOCK_PREFIX " incl %0\n"
" jg %l[exit]\n"
: : "m" (v->counter)
: "memory", "cc"