summaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 13:34:07 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:07 +0100
commitd7c8f21a8cad0228c7c5ce2bb6dbd95d1ee49d13 (patch)
treed1e305bec62022a0bec82a3499a372c2c7c40583 /include/asm-x86
parentd1028a154c65d7fadd1b2d0276c077014d401ec7 (diff)
x86: cpa: move flush to cpa
The set_memory_* and set_pages_* family of API's currently requires the callers to do a global tlb flush after the function call; forgetting this is a very nasty deathtrap. This patch moves the global tlb flush into each of the callers Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/agp.h6
-rw-r--r--include/asm-x86/cacheflush.h1
2 files changed, 1 insertions, 6 deletions
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index f6df7256183..0c309b9a521 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -12,13 +12,9 @@
* page. This avoids data corruption on some CPUs.
*/
-/*
- * Caller's responsibility to call global_flush_tlb() for performance
- * reasons
- */
#define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
-#define flush_agp_mappings() global_flush_tlb()
+#define flush_agp_mappings() do { } while (0)
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index d15ff359d3e..157da0206cc 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -24,7 +24,6 @@
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
-void global_flush_tlb(void);
int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
pgprot_t prot);