diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/assigned-dev.c | 36 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 8 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 10 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 83 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 57 |
5 files changed, 123 insertions, 71 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 23a41a9f8db..3642239252b 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -105,6 +105,15 @@ static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) } #ifdef __KVM_HAVE_MSI +static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id) +{ + struct kvm_assigned_dev_kernel *assigned_dev = dev_id; + int ret = kvm_set_irq_inatomic(assigned_dev->kvm, + assigned_dev->irq_source_id, + assigned_dev->guest_irq, 1); + return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id) { struct kvm_assigned_dev_kernel *assigned_dev = dev_id; @@ -117,6 +126,23 @@ static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id) #endif #ifdef __KVM_HAVE_MSIX +static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id) +{ + struct kvm_assigned_dev_kernel *assigned_dev = dev_id; + int index = find_index_from_host_irq(assigned_dev, irq); + u32 vector; + int ret = 0; + + if (index >= 0) { + vector = assigned_dev->guest_msix_entries[index].vector; + ret = kvm_set_irq_inatomic(assigned_dev->kvm, + assigned_dev->irq_source_id, + vector, 1); + } + + return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) { struct kvm_assigned_dev_kernel *assigned_dev = dev_id; @@ -334,11 +360,6 @@ static int assigned_device_enable_host_intx(struct kvm *kvm, } #ifdef __KVM_HAVE_MSI -static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id) -{ - return IRQ_WAKE_THREAD; -} - static int assigned_device_enable_host_msi(struct kvm *kvm, struct kvm_assigned_dev_kernel *dev) { @@ -363,11 +384,6 @@ static int assigned_device_enable_host_msi(struct kvm *kvm, #endif #ifdef __KVM_HAVE_MSIX -static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id) -{ - return IRQ_WAKE_THREAD; -} - static int assigned_device_enable_host_msix(struct kvm *kvm, struct kvm_assigned_dev_kernel *dev) { diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 9718e98d6d2..b6eea5cc7b3 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -35,6 +35,7 @@ #include "iodev.h" +#ifdef __KVM_HAVE_IOAPIC /* * -------------------------------------------------------------------- * irqfd: Allows an fd to be used to inject an interrupt to the guest @@ -332,7 +333,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) mutex_lock(&kvm->irqfds.resampler_lock); list_for_each_entry(resampler, - &kvm->irqfds.resampler_list, list) { + &kvm->irqfds.resampler_list, link) { if (resampler->notifier.gsi == irqfd->gsi) { irqfd->resampler = resampler; break; @@ -425,17 +426,21 @@ fail: kfree(irqfd); return ret; } +#endif void kvm_eventfd_init(struct kvm *kvm) { +#ifdef __KVM_HAVE_IOAPIC spin_lock_init(&kvm->irqfds.lock); INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->irqfds.resampler_list); mutex_init(&kvm->irqfds.resampler_lock); +#endif INIT_LIST_HEAD(&kvm->ioeventfds); } +#ifdef __KVM_HAVE_IOAPIC /* * shutdown any irqfd's that match fd+gsi */ @@ -555,6 +560,7 @@ static void __exit irqfd_module_exit(void) module_init(irqfd_module_init); module_exit(irqfd_module_exit); +#endif /* * -------------------------------------------------------------------- diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 037cb6730e6..4a340cb2301 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -52,7 +52,7 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; - if (is_error_pfn(pfn)) + if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) @@ -106,7 +106,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size); - if (is_error_pfn(pfn)) { + if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } @@ -168,11 +168,7 @@ int kvm_assign_device(struct kvm *kvm, r = iommu_attach_device(domain, &pdev->dev); if (r) { - printk(KERN_ERR "assign device %x:%x:%x.%x failed", - pci_domain_nr(pdev->bus), - pdev->bus->number, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); + dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 2eb58af7ee9..656fa455e15 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -102,6 +102,23 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, return r; } +static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm_lapic_irq *irq) +{ + trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); + + irq->dest_id = (e->msi.address_lo & + MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + irq->vector = (e->msi.data & + MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; + irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo; + irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data; + irq->delivery_mode = e->msi.data & 0x700; + irq->level = 1; + irq->shorthand = 0; + /* TODO Deal with RH bit of MSI message address */ +} + int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level) { @@ -110,22 +127,26 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, if (!level) return -1; - trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); + kvm_set_msi_irq(e, &irq); - irq.dest_id = (e->msi.address_lo & - MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; - irq.vector = (e->msi.data & - MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; - irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo; - irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data; - irq.delivery_mode = e->msi.data & 0x700; - irq.level = 1; - irq.shorthand = 0; - - /* TODO Deal with RH bit of MSI message address */ return kvm_irq_delivery_to_apic(kvm, NULL, &irq); } + +static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm) +{ + struct kvm_lapic_irq irq; + int r; + + kvm_set_msi_irq(e, &irq); + + if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r)) + return r; + else + return -EWOULDBLOCK; +} + int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) { struct kvm_kernel_irq_routing_entry route; @@ -178,6 +199,44 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) return ret; } +/* + * Deliver an IRQ in an atomic context if we can, or return a failure, + * user can retry in a process context. + * Return value: + * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context. + * Other values - No need to retry. + */ +int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) +{ + struct kvm_kernel_irq_routing_entry *e; + int ret = -EINVAL; + struct kvm_irq_routing_table *irq_rt; + struct hlist_node *n; + + trace_kvm_set_irq(irq, level, irq_source_id); + + /* + * Injection into either PIC or IOAPIC might need to scan all CPUs, + * which would need to be retried from thread context; when same GSI + * is connected to both PIC and IOAPIC, we'd have to report a + * partial failure here. + * Since there's no easy way to do this, we only support injecting MSI + * which is limited to 1:1 GSI mapping. + */ + rcu_read_lock(); + irq_rt = rcu_dereference(kvm->irq_routing); + if (irq < irq_rt->nr_rt_entries) + hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { + if (likely(e->type == KVM_IRQ_ROUTING_MSI)) + ret = kvm_set_msi_inatomic(e, kvm); + else + ret = -EWOULDBLOCK; + break; + } + rcu_read_unlock(); + return ret; +} + void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index be70035fd42..1cd693a76a5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -212,6 +212,11 @@ void kvm_reload_remote_mmus(struct kvm *kvm) make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +void kvm_make_mclock_inprogress_request(struct kvm *kvm) +{ + make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); +} + int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; @@ -709,8 +714,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int r; gfn_t base_gfn; unsigned long npages; - unsigned long i; - struct kvm_memory_slot *memslot; + struct kvm_memory_slot *memslot, *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; @@ -761,13 +765,11 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; - - if (s == memslot || !s->npages) + kvm_for_each_memslot(slot, kvm->memslots) { + if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot) continue; - if (!((base_gfn + npages <= s->base_gfn) || - (base_gfn >= s->base_gfn + s->npages))) + if (!((base_gfn + npages <= slot->base_gfn) || + (base_gfn >= slot->base_gfn + slot->npages))) goto out_free; } @@ -1208,7 +1210,7 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, return KVM_PFN_ERR_RO_FAULT; if (kvm_is_error_hva(addr)) - return KVM_PFN_ERR_BAD; + return KVM_PFN_NOSLOT; /* Do not map writable pfn in the readonly memslot. */ if (writable && memslot_is_readonly(slot)) { @@ -1290,7 +1292,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); static struct page *kvm_pfn_to_page(pfn_t pfn) { - if (is_error_pfn(pfn)) + if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; if (kvm_is_mmio_pfn(pfn)) { @@ -1322,7 +1324,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1848,6 +1850,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); + kvm_arch_vcpu_postcreate(vcpu); return r; unlock_vcpu_destroy: @@ -1929,10 +1932,6 @@ out_free1: goto out; } r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); - if (r) - goto out_free2; - r = 0; -out_free2: kfree(kvm_regs); break; } @@ -1954,12 +1953,10 @@ out_free2: kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); if (IS_ERR(kvm_sregs)) { r = PTR_ERR(kvm_sregs); + kvm_sregs = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); - if (r) - goto out; - r = 0; break; } case KVM_GET_MP_STATE: { @@ -1981,9 +1978,6 @@ out_free2: if (copy_from_user(&mp_state, argp, sizeof mp_state)) goto out; r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); - if (r) - goto out; - r = 0; break; } case KVM_TRANSLATE: { @@ -2008,9 +2002,6 @@ out_free2: if (copy_from_user(&dbg, argp, sizeof dbg)) goto out; r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); - if (r) - goto out; - r = 0; break; } case KVM_SET_SIGNAL_MASK: { @@ -2054,12 +2045,10 @@ out_free2: fpu = memdup_user(argp, sizeof(*fpu)); if (IS_ERR(fpu)) { r = PTR_ERR(fpu); + fpu = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); - if (r) - goto out; - r = 0; break; } default: @@ -2129,8 +2118,6 @@ static long kvm_vm_ioctl(struct file *filp, switch (ioctl) { case KVM_CREATE_VCPU: r = kvm_vm_ioctl_create_vcpu(kvm, arg); - if (r < 0) - goto out; break; case KVM_SET_USER_MEMORY_REGION: { struct kvm_userspace_memory_region kvm_userspace_mem; @@ -2141,8 +2128,6 @@ static long kvm_vm_ioctl(struct file *filp, goto out; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); - if (r) - goto out; break; } case KVM_GET_DIRTY_LOG: { @@ -2152,8 +2137,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&log, argp, sizeof log)) goto out; r = kvm_vm_ioctl_get_dirty_log(kvm, &log); - if (r) - goto out; break; } #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET @@ -2163,9 +2146,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); - if (r) - goto out; - r = 0; break; } case KVM_UNREGISTER_COALESCED_MMIO: { @@ -2174,9 +2154,6 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); - if (r) - goto out; - r = 0; break; } #endif @@ -2285,8 +2262,6 @@ static long kvm_vm_compat_ioctl(struct file *filp, log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); r = kvm_vm_ioctl_get_dirty_log(kvm, &log); - if (r) - goto out; break; } default: |