diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/ioapic.c | 48 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 4 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 4 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 25 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 201 |
5 files changed, 207 insertions, 75 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index cfb7e4d52dc..ce82b940195 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -35,6 +35,7 @@ #include <linux/hrtimer.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/export.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/current.h> @@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic) smp_wmb(); } +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + u64 *eoi_exit_bitmap) +{ + struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; + union kvm_ioapic_redirect_entry *e; + struct kvm_lapic_irq irqe; + int index; + + spin_lock(&ioapic->lock); + /* traverse ioapic entry to set eoi exit bitmap*/ + for (index = 0; index < IOAPIC_NUM_PINS; index++) { + e = &ioapic->redirtbl[index]; + if (!e->fields.mask && + (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || + kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, + index))) { + irqe.dest_id = e->fields.dest_id; + irqe.vector = e->fields.vector; + irqe.dest_mode = e->fields.dest_mode; + irqe.delivery_mode = e->fields.delivery_mode << 8; + kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap); + } + } + spin_unlock(&ioapic->lock); +} +EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap); + +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (!kvm_apic_vid_enabled(kvm) || !ioapic) + return; + kvm_make_update_eoibitmap_request(kvm); +} + static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; @@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); + kvm_ioapic_make_eoibitmap_request(ioapic->kvm); break; } } @@ -179,15 +217,6 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) irqe.level = 1; irqe.shorthand = 0; -#ifdef CONFIG_X86 - /* Always delivery PIT interrupt to vcpu 0 */ - if (irq == 0) { - irqe.dest_mode = 0; /* Physical mode. */ - /* need to read apic_id from apic regiest since - * it can be rewritten */ - irqe.dest_id = ioapic->kvm->bsp_vcpu_id; - } -#endif return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); } @@ -464,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); update_handled_vectors(ioapic); + kvm_ioapic_make_eoibitmap_request(kvm); spin_unlock(&ioapic->lock); return 0; } diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index a30abfe6ed1..0400a466c50 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq); int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm); +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + u64 *eoi_exit_bitmap); + #endif diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 4a340cb2301..72a130bc448 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -76,7 +76,9 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) gfn = slot->base_gfn; end_gfn = gfn + slot->npages; - flags = IOMMU_READ | IOMMU_WRITE; + flags = IOMMU_READ; + if (!(slot->flags & KVM_MEM_READONLY)) + flags |= IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 656fa455e15..ff6d40e2c06 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -22,6 +22,7 @@ #include <linux/kvm_host.h> #include <linux/slab.h> +#include <linux/export.h> #include <trace/events/kvm.h> #include <asm/msidef.h> @@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) return ret; } +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) +{ + struct kvm_irq_ack_notifier *kian; + struct hlist_node *n; + int gsi; + + rcu_read_lock(); + gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + if (gsi != -1) + hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, + link) + if (kian->gsi == gsi) { + rcu_read_unlock(); + return true; + } + + rcu_read_unlock(); + + return false; +} +EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); + void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; @@ -261,6 +284,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, mutex_lock(&kvm->irq_lock); hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); + kvm_ioapic_make_eoibitmap_request(kvm); } void kvm_unregister_irq_ack_notifier(struct kvm *kvm, @@ -270,6 +294,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); synchronize_rcu(); + kvm_ioapic_make_eoibitmap_request(kvm); } int kvm_request_irq_source_id(struct kvm *kvm) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1cd693a76a5..adc68feb5c5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm) make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); } +void kvm_make_update_eoibitmap_request(struct kvm *kvm) +{ + make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); +} + int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; @@ -474,6 +479,8 @@ static struct kvm *kvm_create_vm(unsigned long type) INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif + BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); + r = -ENOMEM; kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) @@ -670,7 +677,8 @@ static void sort_memslots(struct kvm_memslots *slots) slots->id_to_index[slots->memslots[i].id] = i; } -void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) +void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, + u64 last_generation) { if (new) { int id = new->id; @@ -682,7 +690,7 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) sort_memslots(slots); } - slots->generation++; + slots->generation = last_generation + 1; } static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) @@ -699,6 +707,35 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) return 0; } +static struct kvm_memslots *install_new_memslots(struct kvm *kvm, + struct kvm_memslots *slots, struct kvm_memory_slot *new) +{ + struct kvm_memslots *old_memslots = kvm->memslots; + + update_memslots(slots, new, kvm->memslots->generation); + rcu_assign_pointer(kvm->memslots, slots); + synchronize_srcu_expedited(&kvm->srcu); + return old_memslots; +} + +/* + * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: + * - create a new memory slot + * - delete an existing memory slot + * - modify an existing memory slot + * -- move it in the guest physical memory space + * -- just change its flags + * + * Since flags can be changed by some of these operations, the following + * differentiation is the best we can do for __kvm_set_memory_region(): + */ +enum kvm_mr_change { + KVM_MR_CREATE, + KVM_MR_DELETE, + KVM_MR_MOVE, + KVM_MR_FLAGS_ONLY, +}; + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -709,14 +746,15 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) */ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { int r; gfn_t base_gfn; unsigned long npages; - struct kvm_memory_slot *memslot, *slot; + struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; - struct kvm_memslots *slots, *old_memslots; + struct kvm_memslots *slots = NULL, *old_memslots; + enum kvm_mr_change change; r = check_memory_region_flags(mem); if (r) @@ -740,7 +778,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; - memslot = id_to_memslot(kvm->memslots, mem->slot); + slot = id_to_memslot(kvm->memslots, mem->slot); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; @@ -751,26 +789,48 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!npages) mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; - new = old = *memslot; + new = old = *slot; new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; - /* Disallow changing a memory slot's size. */ r = -EINVAL; - if (npages && old.npages && npages != old.npages) - goto out_free; + if (npages) { + if (!old.npages) + change = KVM_MR_CREATE; + else { /* Modify an existing slot. */ + if ((mem->userspace_addr != old.userspace_addr) || + (npages != old.npages) || + ((new.flags ^ old.flags) & KVM_MEM_READONLY)) + goto out; - /* Check for overlaps */ - r = -EEXIST; - kvm_for_each_memslot(slot, kvm->memslots) { - if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot) - continue; - if (!((base_gfn + npages <= slot->base_gfn) || - (base_gfn >= slot->base_gfn + slot->npages))) - goto out_free; + if (base_gfn != old.base_gfn) + change = KVM_MR_MOVE; + else if (new.flags != old.flags) + change = KVM_MR_FLAGS_ONLY; + else { /* Nothing to change. */ + r = 0; + goto out; + } + } + } else if (old.npages) { + change = KVM_MR_DELETE; + } else /* Modify a non-existent slot: disallowed. */ + goto out; + + if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + /* Check for overlaps */ + r = -EEXIST; + kvm_for_each_memslot(slot, kvm->memslots) { + if ((slot->id >= KVM_USER_MEM_SLOTS) || + (slot->id == mem->slot)) + continue; + if (!((base_gfn + npages <= slot->base_gfn) || + (base_gfn >= slot->base_gfn + slot->npages))) + goto out; + } } /* Free page dirty bitmap if unneeded */ @@ -778,10 +838,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new.dirty_bitmap = NULL; r = -ENOMEM; - - /* Allocate if a slot is being created */ - if (npages && !old.npages) { - new.user_alloc = user_alloc; + if (change == KVM_MR_CREATE) { new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(&new, npages)) @@ -792,12 +849,9 @@ int __kvm_set_memory_region(struct kvm *kvm, if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; - /* destroy any largepage mappings for dirty tracking */ } - if (!npages || base_gfn != old.base_gfn) { - struct kvm_memory_slot *slot; - + if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); @@ -806,11 +860,10 @@ int __kvm_set_memory_region(struct kvm *kvm, slot = id_to_memslot(slots, mem->slot); slot->flags |= KVM_MEMSLOT_INVALID; - update_memslots(slots, NULL); + old_memslots = install_new_memslots(kvm, slots, NULL); - old_memslots = kvm->memslots; - rcu_assign_pointer(kvm->memslots, slots); - synchronize_srcu_expedited(&kvm->srcu); + /* slot was deleted or moved, clear iommu mapping */ + kvm_iommu_unmap_pages(kvm, &old); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * @@ -819,37 +872,48 @@ int __kvm_set_memory_region(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow_memslot(kvm, slot); - kfree(old_memslots); + slots = old_memslots; } r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); if (r) - goto out_free; + goto out_slots; - /* map/unmap the pages in iommu page table */ - if (npages) { - r = kvm_iommu_map_pages(kvm, &new); - if (r) + r = -ENOMEM; + /* + * We can re-use the old_memslots from above, the only difference + * from the currently installed memslots is the invalid flag. This + * will get overwritten by update_memslots anyway. + */ + if (!slots) { + slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), + GFP_KERNEL); + if (!slots) goto out_free; - } else - kvm_iommu_unmap_pages(kvm, &old); + } - r = -ENOMEM; - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), - GFP_KERNEL); - if (!slots) - goto out_free; + /* + * IOMMU mapping: New slots need to be mapped. Old slots need to be + * un-mapped and re-mapped if their base changes. Since base change + * unmapping is handled above with slot deletion, mapping alone is + * needed here. Anything else the iommu might care about for existing + * slots (size changes, userspace addr changes and read-only flag + * changes) is disallowed above, so any other attribute changes getting + * here can be skipped. + */ + if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_slots; + } /* actual memory is freed via old in kvm_free_physmem_slot below */ - if (!npages) { + if (change == KVM_MR_DELETE) { new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } - update_memslots(slots, &new); - old_memslots = kvm->memslots; - rcu_assign_pointer(kvm->memslots, slots); - synchronize_srcu_expedited(&kvm->srcu); + old_memslots = install_new_memslots(kvm, slots, &new); kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); @@ -858,17 +922,18 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; +out_slots: + kfree(slots); out_free: kvm_free_physmem_slot(&new, &old); out: return r; - } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { int r; @@ -882,9 +947,9 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { - if (mem->slot >= KVM_MEMORY_SLOTS) + if (mem->slot >= KVM_USER_MEM_SLOTS) return -EINVAL; return kvm_set_memory_region(kvm, mem, user_alloc); } @@ -898,7 +963,7 @@ int kvm_get_dirty_log(struct kvm *kvm, unsigned long any = 0; r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); @@ -944,7 +1009,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); - if (!memslot || memslot->id >= KVM_MEMORY_SLOTS || + if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || memslot->flags & KVM_MEMSLOT_INVALID) return 0; @@ -1641,6 +1706,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) { struct pid *pid; struct task_struct *task = NULL; + bool ret = false; rcu_read_lock(); pid = rcu_dereference(target->pid); @@ -1648,17 +1714,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) task = get_pid_task(target->pid, PIDTYPE_PID); rcu_read_unlock(); if (!task) - return false; + return ret; if (task->flags & PF_VCPU) { put_task_struct(task); - return false; - } - if (yield_to(task, 1)) { - put_task_struct(task); - return true; + return ret; } + ret = yield_to(task, 1); put_task_struct(task); - return false; + + return ret; } EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); @@ -1699,12 +1763,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) return eligible; } #endif + void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; + int try = 3; int pass; int i; @@ -1716,7 +1782,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ - for (pass = 0; pass < 2 && !yielded; pass++) { + for (pass = 0; pass < 2 && !yielded && try; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; @@ -1729,10 +1795,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; - if (kvm_vcpu_yield_to(vcpu)) { + + yielded = kvm_vcpu_yield_to(vcpu); + if (yielded > 0) { kvm->last_boosted_vcpu = i; - yielded = 1; break; + } else if (yielded < 0) { + try--; + if (!try) + break; } } } @@ -2127,7 +2198,7 @@ static long kvm_vm_ioctl(struct file *filp, sizeof kvm_userspace_mem)) goto out; - r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); + r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true); break; } case KVM_GET_DIRTY_LOG: { |