summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/pgtable.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 13:32:58 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:32:58 +0100
commit195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch)
tree38a4dc9e105d54cf285cdcbc141b424a2fc16f41 /include/asm-x86/pgtable.h
parente33287013585e96180c575288bf1db22bee47b52 (diff)
x86: pgtable: unify pte accessors
Make various pte accessors common. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/pgtable.h')
-rw-r--r--include/asm-x86/pgtable.h110
1 files changed, 109 insertions, 1 deletions
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 6452286e71a..62fa856c249 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -116,6 +116,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define __S111 PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -169,7 +170,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return __pte(val);
}
-
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
@@ -178,4 +178,112 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
# include "pgtable_64.h"
#endif
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_PARAVIRT
+/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces. It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush. The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#endif
+
+/* local pte updates need not use xchg for locking */
+static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
+{
+ pte_t res = *ptep;
+
+ /* Pure native function needs no input for mm, addr */
+ native_pte_clear(NULL, 0, ptep);
+ return res;
+}
+
+/*
+ * We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
+({ \
+ int __changed = !pte_same(*(ptep), entry); \
+ if (__changed && dirty) { \
+ *ptep = entry; \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __changed; \
+})
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
+ int __ret = 0; \
+ if (pte_young(*(ptep))) \
+ __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
+ &(ptep)->pte); \
+ if (__ret) \
+ pte_update((vma)->vm_mm, addr, ptep); \
+ __ret; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(vma, address, ptep) \
+({ \
+ int __young; \
+ __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
+ if (__young) \
+ flush_tlb_page(vma, address); \
+ __young; \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = native_ptep_get_and_clear(ptep);
+ pte_update(mm, addr, ptep);
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+{
+ pte_t pte;
+ if (full) {
+ /*
+ * Full address destruction in progress; paravirt does not
+ * care about updates and native needs no locking
+ */
+ pte = native_local_ptep_get_and_clear(ptep);
+ } else {
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ }
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ clear_bit(_PAGE_BIT_RW, &ptep->pte);
+ pte_update(mm, addr, ptep);
+}
+
+#ifndef CONFIG_PARAVIRT
+#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
+#endif /* !CONFIG_PARAVIRT */
+
+#include <asm-generic/pgtable.h>
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_X86_PGTABLE_H */