summaryrefslogtreecommitdiffstats
path: root/arch/x86/um/asm/barrier.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-04-19 17:06:26 -0300
committerMarcelo Tosatti <mtosatti@redhat.com>2012-04-19 17:06:26 -0300
commiteac0556750e727ff39144a9a9e59d5ccf1fc0e2a (patch)
treef5ccff7795b2ad5e47f17fb475599c526f533e79 /arch/x86/um/asm/barrier.h
parentf71fa31f9f7ac33cba12b8897983f950ad2c7a5b (diff)
parent19853301ef3289bda2d5264c1093e74efddaeab9 (diff)
Merge branch 'linus' into queue
Merge reason: development work has dependency on kvm patches merged upstream. Conflicts: Documentation/feature-removal-schedule.txt Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/um/asm/barrier.h')
-rw-r--r--arch/x86/um/asm/barrier.h75
1 files changed, 75 insertions, 0 deletions
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 00000000000..7d01b8c56c0
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_UM_BARRIER_H_
+#define _ASM_UM_BARRIER_H_
+
+#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+
+#else /* CONFIG_X86_32 */
+
+#define mb() asm volatile("mfence" : : : "memory")
+#define rmb() asm volatile("lfence" : : : "memory")
+#define wmb() asm volatile("sfence" : : : "memory")
+
+#endif /* CONFIG_X86_32 */
+
+#define read_barrier_depends() do { } while (0)
+
+#ifdef CONFIG_SMP
+
+#define smp_mb() mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+#define smp_rmb() rmb()
+#else /* CONFIG_X86_PPRO_FENCE */
+#define smp_rmb() barrier()
+#endif /* CONFIG_X86_PPRO_FENCE */
+
+#ifdef CONFIG_X86_OOSTORE
+#define smp_wmb() wmb()
+#else /* CONFIG_X86_OOSTORE */
+#define smp_wmb() barrier()
+#endif /* CONFIG_X86_OOSTORE */
+
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#else /* CONFIG_SMP */
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif