summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2013-02-20 18:52:02 -0300
committerMarcelo Tosatti <mtosatti@redhat.com>2013-02-20 18:52:02 -0300
commit6b73a96065e89dc9fa75ba4f78b1aa3a3bbd0470 (patch)
tree170a39f429c7b98ee25ddc6436139a57bb5dd6f9 /arch/x86/kvm
parented55705dd5008b408c48a8459b8b34b01f3de985 (diff)
Revert "KVM: MMU: lazily drop large spte"
This reverts commit caf6900f2d8aaebe404c976753f6813ccd31d95e. It is causing migration failures, reference https://bugzilla.kernel.org/show_bug.cgi?id=54061. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1cda1f33265..4ed3edbe06b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1105,7 +1105,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
/*
* Write-protect on the specified @sptep, @pt_protect indicates whether
- * spte write-protection is caused by protecting shadow page table.
+ * spte writ-protection is caused by protecting shadow page table.
+ * @flush indicates whether tlb need be flushed.
*
* Note: write protection is difference between drity logging and spte
* protection:
@@ -1114,9 +1115,10 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
* - for spte protection, the spte can be writable only after unsync-ing
* shadow page.
*
- * Return true if tlb need be flushed.
+ * Return true if the spte is dropped.
*/
-static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
+static bool
+spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
{
u64 spte = *sptep;
@@ -1126,11 +1128,17 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+ if (__drop_large_spte(kvm, sptep)) {
+ *flush |= true;
+ return true;
+ }
+
if (pt_protect)
spte &= ~SPTE_MMU_WRITEABLE;
spte = spte & ~PT_WRITABLE_MASK;
- return mmu_spte_update(sptep, spte);
+ *flush |= mmu_spte_update(sptep, spte);
+ return false;
}
static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
@@ -1142,8 +1150,11 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
+ sptep = rmap_get_first(*rmapp, &iter);
+ continue;
+ }
- flush |= spte_write_protect(kvm, sptep, pt_protect);
sptep = rmap_get_next(&iter);
}
@@ -2581,8 +2592,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
break;
}
- drop_large_spte(vcpu, iterator.sptep);
-
if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;