summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2006-01-17 07:03:47 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-16 23:18:35 -0800
commit2ddb55f091a9c74a297d72b50f8310c0c8ed7d1d (patch)
treec2389badfdb028dc30bbd0948c16ff25e2a77c27 /include/asm-x86_64
parentc09b42404d29c8a9266f8186632330dc8474bf2e (diff)
[PATCH] x86_64: Fix VSMP build
Patch fixes a build problem with CONFIG_X86_VSMP. The vSMP bits probably gathered some fuzz on its way to mainline, and safe_halt() which was outside the #endif (CONFIG_X86_VSMP) somehow got inside the !CONFIG_X86_VSMP condition, hence being undefined and breaking CONFIG_X86_VSMP builds. Patch takes safe_halt() and halt() macros out of the #endif Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/system.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 0eacbefb7dd..a73f0c789d8 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -354,11 +354,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
-/* used in the idle loop; sti takes one instruction cycle to complete */
-#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-/* used when interrupts are already enabled or to shutdown the processor */
-#define halt() __asm__ __volatile__("hlt": : :"memory")
-
#define irqs_disabled() \
({ \
unsigned long flags; \
@@ -370,6 +365,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
#endif
+/* used in the idle loop; sti takes one instruction cycle to complete */
+#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt() __asm__ __volatile__("hlt": : :"memory")
+
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);