diff options
author | Izik Eidus <izike@qumranet.com> | 2007-10-16 14:43:46 +0200 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 17:52:54 +0200 |
commit | 9647c14c98687d0abf5197e74b9d1448ab6ebb95 (patch) | |
tree | 6bd61ace880d9428c38ebe45858793d689ca4f4b /drivers/kvm | |
parent | 98348e9507ace5fda95432ff8ca23f13e7f66176 (diff) |
KVM: MMU: Keep a reverse mapping of non-writable translations
The current kvm mmu only reverse maps writable translation. This is used
to write-protect a page in case it becomes a pagetable.
But with swapping support, we need a reverse mapping of read-only pages as
well: when we evict a page, we need to remove any mapping to it, whether
writable or not.
Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/mmu.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 14e54e331f5..bbf5eb427dc 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -211,8 +211,8 @@ static int is_io_pte(unsigned long pte) static int is_rmap_pte(u64 pte) { - return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK)) - == (PT_WRITABLE_MASK | PT_PRESENT_MASK); + return pte != shadow_trap_nonpresent_pte + && pte != shadow_notrap_nonpresent_pte; } static void set_shadow_pte(u64 *sptep, u64 spte) @@ -488,7 +488,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) { unsigned long *rmapp; u64 *spte; - u64 *prev_spte; gfn = unalias_gfn(kvm, gfn); rmapp = gfn_to_rmap(kvm, gfn); @@ -497,13 +496,11 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) while (spte) { BUG_ON(!spte); BUG_ON(!(*spte & PT_PRESENT_MASK)); - BUG_ON(!(*spte & PT_WRITABLE_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); - prev_spte = spte; - spte = rmap_next(kvm, rmapp, spte); - rmap_remove(kvm, prev_spte); - set_shadow_pte(prev_spte, *prev_spte & ~PT_WRITABLE_MASK); + if (is_writeble_pte(*spte)) + set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); kvm_flush_remote_tlbs(kvm); + spte = rmap_next(kvm, rmapp, spte); } } @@ -908,14 +905,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) table = __va(table_addr); if (level == 1) { + int was_rmapped; + pte = table[index]; + was_rmapped = is_rmap_pte(pte); if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) return 0; mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); page_header_update_slot(vcpu->kvm, table, v); table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK; - rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); + if (!was_rmapped) + rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); return 0; } @@ -1424,10 +1425,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) pt = page->spt; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) /* avoid RMW */ - if (pt[i] & PT_WRITABLE_MASK) { - rmap_remove(kvm, &pt[i]); + if (pt[i] & PT_WRITABLE_MASK) pt[i] &= ~PT_WRITABLE_MASK; - } } } |