diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 57 |
1 files changed, 16 insertions, 41 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index be70035fd42..1cd693a76a5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -212,6 +212,11 @@ void kvm_reload_remote_mmus(struct kvm *kvm) make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +void kvm_make_mclock_inprogress_request(struct kvm *kvm) +{ + make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); +} + int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; @@ -709,8 +714,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int r; gfn_t base_gfn; unsigned long npages; - unsigned long i; - struct kvm_memory_slot *memslot; + struct kvm_memory_slot *memslot, *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; @@ -761,13 +765,11 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; - - if (s == memslot || !s->npages) + kvm_for_each_memslot(slot, kvm->memslots) { + if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot) continue; - if (!((base_gfn + npages <= s->base_gfn) || - (base_gfn >= s->base_gfn + s->npages))) + if (!((base_gfn + npages <= slot->base_gfn) || + (base_gfn >= slot->base_gfn + slot->npages))) goto out_free; } @@ -1208,7 +1210,7 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, return KVM_PFN_ERR_RO_FAULT; if (kvm_is_error_hva(addr)) - return KVM_PFN_ERR_BAD; + return KVM_PFN_NOSLOT; /* Do not map writable pfn in the readonly memslot. */ if (writable && memslot_is_readonly(slot)) { @@ -1290,7 +1292,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); static struct page *kvm_pfn_to_page(pfn_t pfn) { - if (is_error_pfn(pfn)) + if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; if (kvm_is_mmio_pfn(pfn)) { @@ -1322,7 +1324,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1848,6 +1850,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); + kvm_arch_vcpu_postcreate(vcpu); return r; unlock_vcpu_destroy: @@ -1929,10 +1932,6 @@ out_free1: goto out; } r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); - if (r) - goto out_free2; - r = 0; -out_free2: kfree(kvm_regs); break; } @@ -1954,12 +1953,10 @@ out_free2: kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); if (IS_ERR(kvm_sregs)) { r = PTR_ERR(kvm_sregs); + kvm_sregs = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); - if (r) - goto out; - r = 0; break; } case KVM_GET_MP_STATE: { @@ -1981,9 +1978,6 @@ out_free2: if (copy_from_user(&mp_state, argp, sizeof mp_state)) goto out; r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); - if (r) - goto out; - r = 0; break; } case KVM_TRANSLATE: { @@ -2008,9 +2002,6 @@ out_free2: if (copy_from_user(&dbg, argp, sizeof dbg)) goto out; r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); - if (r) - goto out; - r = 0; break; } case KVM_SET_SIGNAL_MASK: { @@ -2054,12 +2045,10 @@ out_free2: fpu = memdup_user(argp, sizeof(*fpu)); if (IS_ERR(fpu)) { r = PTR_ERR(fpu); + fpu = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); - if (r) - goto out; - r = 0; break; } default: @@ -2129,8 +2118,6 @@ static long kvm_vm_ioctl(struct file *filp, switch (ioctl) { case KVM_CREATE_VCPU: r = kvm_vm_ioctl_create_vcpu(kvm, arg); - if (r < 0) - goto out; break; case KVM_SET_USER_MEMORY_REGION: { struct kvm_userspace_memory_region kvm_userspace_mem; @@ -2141,8 +2128,6 @@ static long kvm_vm_ioctl(struct file *filp, goto out; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); - if (r) - goto out; break; } case KVM_GET_DIRTY_LOG: { @@ -2152,8 +2137,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&log, argp, sizeof log)) goto out; r = kvm_vm_ioctl_get_dirty_log(kvm, &log); - if (r) - goto out; break; } #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET @@ -2163,9 +2146,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); - if (r) - goto out; - r = 0; break; } case KVM_UNREGISTER_COALESCED_MMIO: { @@ -2174,9 +2154,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); - if (r) - goto out; - r = 0; break; } #endif @@ -2285,8 +2262,6 @@ static long kvm_vm_compat_ioctl(struct file *filp, log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); r = kvm_vm_ioctl_get_dirty_log(kvm, &log); - if (r) - goto out; break; } default: |