summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/paravirt.c2
-rw-r--r--arch/i386/kernel/vmi.c19
-rw-r--r--include/asm-i386/paravirt.h4
-rw-r--r--include/asm-i386/pgtable.h23
4 files changed, 44 insertions, 4 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 8352394d5ef..12e3bc49b83 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -553,6 +553,8 @@ struct paravirt_ops paravirt_ops = {
.flush_tlb_kernel = native_flush_tlb_global,
.flush_tlb_single = native_flush_tlb_single,
+ .map_pt_hook = (void *)native_nop,
+
.alloc_pt = (void *)native_nop,
.alloc_pd = (void *)native_nop,
.alloc_pd_clone = (void *)native_nop,
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index acdfe69fb7a..bd1037bd124 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -370,6 +370,24 @@ static void vmi_check_page_type(u32 pfn, int type)
#define vmi_check_page_type(p,t) do { } while (0)
#endif
+static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
+{
+ /*
+ * Internally, the VMI ROM must map virtual addresses to physical
+ * addresses for processing MMU updates. By the time MMU updates
+ * are issued, this information is typically already lost.
+ * Fortunately, the VMI provides a cache of mapping slots for active
+ * page tables.
+ *
+ * We use slot zero for the linear mapping of physical memory, and
+ * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+ *
+ * args: SLOT VA COUNT PFN
+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(void)
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
+ paravirt_ops.map_pt_hook = vmi_map_pt_hook;
paravirt_ops.alloc_pt = vmi_allocate_pt;
paravirt_ops.alloc_pd = vmi_allocate_pd;
paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index a35c8148065..e01d895d737 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -131,6 +131,8 @@ struct paravirt_ops
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(u32 addr);
+ void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn);
+
void (*alloc_pt)(u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
@@ -354,6 +356,8 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
+
#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e6a4723f0eb..c3b58d473a5 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -263,6 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
#endif
/*
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned long address);
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
+ paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
+#define pte_offset_map_nested(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
+ paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else