summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/agp_32.h
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-10-13 14:58:23 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2007-10-13 14:58:23 +0100
commitebf8889bd1fe3615991ff4494635d237280652a2 (patch)
tree10fb735717122bbb86474339eac07f26e7ccdf40 /include/asm-x86/agp_32.h
parentb160292cc216a50fd0cd386b0bda2cd48352c73b (diff)
parent752097cec53eea111d087c545179b421e2bde98a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-x86/agp_32.h')
-rw-r--r--include/asm-x86/agp_32.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/include/asm-x86/agp_32.h b/include/asm-x86/agp_32.h
new file mode 100644
index 00000000000..6af173dbf12
--- /dev/null
+++ b/include/asm-x86/agp_32.h
@@ -0,0 +1,36 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Functions to keep the agpgart mappings coherent with the MMU.
+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
+ * mapped uncacheable. Make sure there are no conflicting mappings
+ * with different cachability attributes for the same page. This avoids
+ * data corruption on some CPUs.
+ */
+
+/* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
+#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+#define flush_agp_mappings() global_flush_tlb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+#define flush_agp_cache() wbinvd()
+
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif