summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-04-07 11:33:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-07 11:33:04 -0700
commit7bc30c23c8ace3821a6732bfbe7e8f1b0995a63e (patch)
tree3440c323f331fb2c0b5e84c206b3c73d70826ee6
parentccfeef0ff76ebd632ae51bc56700f0072c4f1864 (diff)
parentbd22f5cfcfe8f68bf43b72daf4530cd7eedc9b7a (diff)
Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: move and fix substitue search for missing CPUID entries KVM: fix XSAVE bit scanning KVM: Enable async page fault processing KVM: fix crash on irqfd deassign
-rw-r--r--arch/x86/kvm/x86.c37
-rw-r--r--virt/kvm/eventfd.c2
-rw-r--r--virt/kvm/kvm_main.c23
3 files changed, 50 insertions, 12 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 58f517b5964..934b4c6b0bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2395,9 +2395,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
int i;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- for (i = 1; *nent < maxnent; ++i) {
- if (entry[i - 1].eax == 0 && i != 2)
- break;
+ for (i = 1; *nent < maxnent && i < 64; ++i) {
+ if (entry[i].eax == 0)
+ continue;
do_cpuid_1_ent(&entry[i], function, i);
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
@@ -4958,12 +4958,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
best = e;
break;
}
- /*
- * Both basic or both extended?
- */
- if (((e->function ^ function) & 0x80000000) == 0)
- if (!best || e->function > best->function)
- best = e;
}
return best;
}
@@ -4983,6 +4977,27 @@ not_found:
return 36;
}
+/*
+ * If no match is found, check whether we exceed the vCPU's limit
+ * and return the content of the highest valid _standard_ leaf instead.
+ * This is to satisfy the CPUID specification.
+ */
+static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
+ u32 function, u32 index)
+{
+ struct kvm_cpuid_entry2 *maxlevel;
+
+ maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+ if (!maxlevel || maxlevel->eax >= function)
+ return NULL;
+ if (function & 0x80000000) {
+ maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
+ if (!maxlevel)
+ return NULL;
+ }
+ return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+}
+
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
u32 function, index;
@@ -4995,6 +5010,10 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
best = kvm_find_cpuid_entry(vcpu, function, index);
+
+ if (!best)
+ best = check_cpuid_limit(vcpu, function, index);
+
if (best) {
kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 36d8092dbb3..73358d256fa 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
* We know no new events will be scheduled at this point, so block
* until all previously outstanding events have completed
*/
- flush_work(&irqfd->inject);
+ flush_work_sync(&irqfd->inject);
/*
* It is now safe to release the object's resources
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 556e3efe532..6330653480e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1037,6 +1037,17 @@ static pfn_t get_fault_pfn(void)
return fault_pfn;
}
+int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int write, struct page **page)
+{
+ int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
+
+ if (write)
+ flags |= FOLL_WRITE;
+
+ return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
+}
+
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1070,7 +1081,14 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
if (writable)
*writable = write_fault;
- npages = get_user_pages_fast(addr, 1, write_fault, page);
+ if (async) {
+ down_read(&current->mm->mmap_sem);
+ npages = get_user_page_nowait(current, current->mm,
+ addr, write_fault, page);
+ up_read(&current->mm->mmap_sem);
+ } else
+ npages = get_user_pages_fast(addr, 1, write_fault,
+ page);
/* map read fault as writable if possible */
if (unlikely(!write_fault) && npages == 1) {
@@ -1093,7 +1111,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
return get_fault_pfn();
down_read(&current->mm->mmap_sem);
- if (check_user_page_hwpoison(addr)) {
+ if (npages == -EHWPOISON ||
+ (!async && check_user_page_hwpoison(addr))) {
up_read(&current->mm->mmap_sem);
get_page(hwpoison_page);
return page_to_pfn(hwpoison_page);