summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-12 03:28:54 +0800
committerAvi Kivity <avi@redhat.com>2011-07-24 11:50:34 +0300
commitfce92dce79dbf5fff39c7ac2fb149729d79b7a39 (patch)
tree455461b843f5f94356786ea0e21132740458588a /virt/kvm
parentc37079586f317d7e7f1a70d36f0e5177691c89c2 (diff)
KVM: MMU: filter out the mmio pfn from the fault pfn
If the page fault is caused by mmio, the gfn can not be found in memslots, and 'bad_pfn' is returned on gfn_to_hva path, so we can use 'bad_pfn' to identify the mmio page fault. And, to clarify the meaning of mmio pfn, we return fault page instead of bad page when the gfn is not allowd to prefetch Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d5ef9ebcaff..56f3c704fd7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -101,8 +101,8 @@ static bool largepages_enabled = true;
static struct page *hwpoison_page;
static pfn_t hwpoison_pfn;
-static struct page *fault_page;
-static pfn_t fault_pfn;
+struct page *fault_page;
+pfn_t fault_pfn;
inline int kvm_is_mmio_pfn(pfn_t pfn)
{
@@ -931,6 +931,18 @@ int is_fault_pfn(pfn_t pfn)
}
EXPORT_SYMBOL_GPL(is_fault_pfn);
+int is_noslot_pfn(pfn_t pfn)
+{
+ return pfn == bad_pfn;
+}
+EXPORT_SYMBOL_GPL(is_noslot_pfn);
+
+int is_invalid_pfn(pfn_t pfn)
+{
+ return pfn == hwpoison_pfn || pfn == fault_pfn;
+}
+EXPORT_SYMBOL_GPL(is_invalid_pfn);
+
static inline unsigned long bad_hva(void)
{
return PAGE_OFFSET;