summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r--include/asm-i386/system.h89
1 files changed, 55 insertions, 34 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 0249f912a29..a6dabbcd6e6 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -11,9 +11,14 @@
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
#define switch_to(prev,next,last) do { \
unsigned long esi,edi; \
- asm volatile("pushl %%ebp\n\t" \
+ asm volatile("pushfl\n\t" /* Save flags */ \
+ "pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %5,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
@@ -21,6 +26,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
+ "popfl" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=a" (last),"=S" (esi),"=D" (edi) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
@@ -82,10 +88,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -94,7 +96,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
#define read_cr2() ({ \
unsigned int __dummy; \
@@ -104,7 +106,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr2(x) \
- __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
#define read_cr3() ({ \
unsigned int __dummy; \
@@ -114,7 +116,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr3(x) \
- __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
#define read_cr4() ({ \
unsigned int __dummy; \
@@ -123,7 +125,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
-
#define read_cr4_safe() ({ \
unsigned int __dummy; \
/* This could fault if %cr4 does not exist */ \
@@ -135,15 +136,19 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
: "=r" (__dummy): "0" (0)); \
__dummy; \
})
-
#define write_cr4(x) \
- __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
#define stts() write_cr0(8 | read_cr0())
#endif /* __KERNEL__ */
#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
+ __asm__ __volatile__ ("wbinvd": : :"memory")
static inline unsigned long get_limit(unsigned long segment)
{
@@ -262,6 +267,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
+#define sync_cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -291,6 +299,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return old;
}
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
#ifndef CONFIG_X86_CMPXCHG
/*
* Building a kernel capable running on 80386. It may be necessary to
@@ -427,7 +468,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)
@@ -454,27 +495,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-/* interrupt control.. */
-#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
-#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
-#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
-#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
-/* used in the idle loop; sti takes one instruction cycle to complete */
-#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-/* used when interrupts are already enabled or to shutdown the processor */
-#define halt() __asm__ __volatile__("hlt": : :"memory")
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !(flags & (1<<9)); \
-})
-
-/* For spinlocks etc */
-#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
+#include <linux/irqflags.h>
/*
* disable hlt during certain critical i/o operations