summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-03-25 16:31:31 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 09:14:38 -0800
commitba22f13563de5773701fc318ccaaa37b1fb6d294 (patch)
treee29ddeaca4c9f7b1b5ef08804980600d0e089f5b /include/asm-x86_64
parent9d95dd849ccc43c4b21504e1829b5bed68cdb1bc (diff)
[PATCH] x86_64: Remove CONFIG_UNORDERED_IO
It was a failed experiment - all benchmarks done with it on both AMD and Intel showed it was a loss. That was probably because the store buffers of the CPUs for write combining traffic weren't large enough. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/io.h18
1 files changed, 0 insertions, 18 deletions
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index ac12bda3bb1..cafdfb37f0d 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -200,23 +200,6 @@ static inline __u64 __readq(const volatile void __iomem *addr)
#define mmiowb()
-#ifdef CONFIG_UNORDERED_IO
-static inline void __writel(__u32 val, volatile void __iomem *addr)
-{
- volatile __u32 __iomem *target = addr;
- asm volatile("movnti %1,%0"
- : "=m" (*target)
- : "r" (val) : "memory");
-}
-
-static inline void __writeq(__u64 val, volatile void __iomem *addr)
-{
- volatile __u64 __iomem *target = addr;
- asm volatile("movnti %1,%0"
- : "=m" (*target)
- : "r" (val) : "memory");
-}
-#else
static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
@@ -225,7 +208,6 @@ static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
-#endif
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;