diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/assigned-dev.c | 7 | ||||
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 1 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 1 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 3 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 12 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 15 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 106 |
7 files changed, 103 insertions, 42 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 4d10b1e047f..7c98928b09d 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -1,7 +1,7 @@ /* * Kernel-based Virtual Machine - device assignment support * - * Copyright (C) 2006-9 Red Hat, Inc + * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. @@ -58,12 +58,10 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) { struct kvm_assigned_dev_kernel *assigned_dev; - struct kvm *kvm; int i; assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, interrupt_work); - kvm = assigned_dev->kvm; spin_lock_irq(&assigned_dev->assigned_dev_lock); if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { @@ -448,9 +446,6 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *match; unsigned long host_irq_type, guest_irq_type; - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - if (!irqchip_in_kernel(kvm)) return r; diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 53850177163..fc8487564d1 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -2,6 +2,7 @@ * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. + * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Author: Laurent Vivier <Laurent.Vivier@bull.net> * diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index b81f0ebbaaa..66cf65b510b 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -2,6 +2,7 @@ * kvm eventfd support - use eventfd objects to signal various KVM events * * Copyright 2009 Novell. All Rights Reserved. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: * Gregory Haskins <ghaskins@novell.com> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 3500dee9cf2..0b9df8303dc 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2001 MandrakeSoft S.A. + * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * MandrakeSoft S.A. * 43, rue d'Aboukir @@ -151,7 +152,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) update_handled_vectors(ioapic); mask_after = e->fields.mask; if (mask_before != mask_after) - kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); + kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 96048ee9e39..62a9caf0563 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -16,6 +16,8 @@ * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> @@ -106,7 +108,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) get_order(page_size), flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" - "iommu failed to map pfn=%lx\n", pfn); + "iommu failed to map pfn=%llx\n", pfn); goto unmap_pages; } @@ -124,9 +126,10 @@ unmap_pages: static int kvm_iommu_map_memslots(struct kvm *kvm) { - int i, r = 0; + int i, idx, r = 0; struct kvm_memslots *slots; + idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { @@ -134,6 +137,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) if (r) break; } + srcu_read_unlock(&kvm->srcu, idx); return r; } @@ -283,15 +287,17 @@ static void kvm_iommu_put_pages(struct kvm *kvm, static int kvm_iommu_unmap_memslots(struct kvm *kvm) { - int i; + int i, idx; struct kvm_memslots *slots; + idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, slots->memslots[i].npages); } + srcu_read_unlock(&kvm->srcu, idx); return 0; } diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a0e88809e45..369e38010ad 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -17,6 +17,7 @@ * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * + * Copyright 2010 Red Hat, Inc. and/or its affilates. */ #include <linux/kvm_host.h> @@ -99,7 +100,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, if (r < 0) r = 0; r += kvm_apic_set_irq(vcpu, irq); - } else { + } else if (kvm_lapic_enabled(vcpu)) { if (!lowest) lowest = vcpu; else if (kvm_apic_compare_prio(vcpu, lowest) < 0) @@ -278,15 +279,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, synchronize_rcu(); } -void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) +void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, + bool mask) { struct kvm_irq_mask_notifier *kimn; struct hlist_node *n; + int gsi; rcu_read_lock(); - hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) - if (kimn->irq == irq) - kimn->func(kimn, mask); + gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + if (gsi != -1) + hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) + if (kimn->irq == gsi) + kimn->func(kimn, mask); rcu_read_unlock(); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f032806a212..b78b794c103 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5,6 +5,7 @@ * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. + * Copyright 2010 Red Hat, Inc. and/or its affilates. * * Authors: * Avi Kivity <avi@qumranet.com> @@ -92,6 +93,12 @@ static bool kvm_rebooting; static bool largepages_enabled = true; +static struct page *hwpoison_page; +static pfn_t hwpoison_pfn; + +static struct page *fault_page; +static pfn_t fault_pfn; + inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { @@ -141,7 +148,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) raw_spin_lock(&kvm->requests_lock); me = smp_processor_id(); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_and_set_bit(req, &vcpu->requests)) + if (kvm_make_check_request(req, vcpu)) continue; cpu = vcpu->cpu; if (cpus != NULL && cpu != -1 && cpu != me) @@ -566,6 +573,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new = old = *memslot; + new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; @@ -596,7 +604,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Allocate if a slot is being created */ #ifndef CONFIG_S390 if (npages && !new.rmap) { - new.rmap = vmalloc(npages * sizeof(struct page *)); + new.rmap = vmalloc(npages * sizeof(*new.rmap)); if (!new.rmap) goto out_free; @@ -621,9 +629,9 @@ int __kvm_set_memory_region(struct kvm *kvm, if (new.lpage_info[i]) continue; - lpages = 1 + (base_gfn + npages - 1) / - KVM_PAGES_PER_HPAGE(level); - lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); + lpages = 1 + ((base_gfn + npages - 1) + >> KVM_HPAGE_GFN_SHIFT(level)); + lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); @@ -633,9 +641,9 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.lpage_info[i], 0, lpages * sizeof(*new.lpage_info[i])); - if (base_gfn % KVM_PAGES_PER_HPAGE(level)) + if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) new.lpage_info[i][0].write_count = 1; - if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) + if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) new.lpage_info[i][lpages - 1].write_count = 1; ugfn = new.userspace_addr >> PAGE_SHIFT; /* @@ -810,16 +818,28 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages); int is_error_page(struct page *page) { - return page == bad_page; + return page == bad_page || page == hwpoison_page || page == fault_page; } EXPORT_SYMBOL_GPL(is_error_page); int is_error_pfn(pfn_t pfn) { - return pfn == bad_pfn; + return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; } EXPORT_SYMBOL_GPL(is_error_pfn); +int is_hwpoison_pfn(pfn_t pfn) +{ + return pfn == hwpoison_pfn; +} +EXPORT_SYMBOL_GPL(is_hwpoison_pfn); + +int is_fault_pfn(pfn_t pfn) +{ + return pfn == fault_pfn; +} +EXPORT_SYMBOL_GPL(is_fault_pfn); + static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -831,7 +851,7 @@ int kvm_is_error_hva(unsigned long addr) } EXPORT_SYMBOL_GPL(kvm_is_error_hva); -struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_memslots *slots = kvm_memslots(kvm); @@ -845,20 +865,13 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) } return NULL; } -EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); - -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) -{ - gfn = unalias_gfn(kvm, gfn); - return gfn_to_memslot_unaliased(kvm, gfn); -} +EXPORT_SYMBOL_GPL(gfn_to_memslot); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_memslots *slots = kvm_memslots(kvm); - gfn = unalias_gfn_instantiation(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -903,7 +916,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot = NULL; - gfn = unalias_gfn(kvm, gfn); for (i = 0; i < slots->nmemslots; ++i) { memslot = &slots->memslots[i]; @@ -924,8 +936,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - gfn = unalias_gfn_instantiation(kvm, gfn); - slot = gfn_to_memslot_unaliased(kvm, gfn); + slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); return gfn_to_hva_memslot(slot, gfn); @@ -946,13 +957,19 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) struct vm_area_struct *vma; down_read(¤t->mm->mmap_sem); + if (is_hwpoison_address(addr)) { + up_read(¤t->mm->mmap_sem); + get_page(hwpoison_page); + return page_to_pfn(hwpoison_page); + } + vma = find_vma(current->mm, addr); if (vma == NULL || addr < vma->vm_start || !(vma->vm_flags & VM_PFNMAP)) { up_read(¤t->mm->mmap_sem); - get_page(bad_page); - return page_to_pfn(bad_page); + get_page(fault_page); + return page_to_pfn(fault_page); } pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -1187,8 +1204,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot; - gfn = unalias_gfn(kvm, gfn); - memslot = gfn_to_memslot_unaliased(kvm, gfn); + memslot = gfn_to_memslot(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; @@ -1207,7 +1223,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); if (kvm_arch_vcpu_runnable(vcpu)) { - set_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_make_request(KVM_REQ_UNHALT, vcpu); break; } if (kvm_cpu_has_pending_timer(vcpu)) @@ -1378,6 +1394,18 @@ static long kvm_vcpu_ioctl(struct file *filp, if (vcpu->kvm->mm != current->mm) return -EIO; + +#if defined(CONFIG_S390) || defined(CONFIG_PPC) + /* + * Special cases: vcpu ioctls that are asynchronous to vcpu execution, + * so vcpu_load() would break it. + */ + if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) + return kvm_arch_vcpu_ioctl(filp, ioctl, arg); +#endif + + + vcpu_load(vcpu); switch (ioctl) { case KVM_RUN: r = -EINVAL; @@ -1520,7 +1548,7 @@ out_free2: goto out; p = &sigset; } - r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); break; } case KVM_GET_FPU: { @@ -1555,6 +1583,7 @@ out_free2: r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); } out: + vcpu_put(vcpu); kfree(fpu); kfree(kvm_sregs); return r; @@ -2197,6 +2226,24 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, bad_pfn = page_to_pfn(bad_page); + hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + + if (hwpoison_page == NULL) { + r = -ENOMEM; + goto out_free_0; + } + + hwpoison_pfn = page_to_pfn(hwpoison_page); + + fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + + if (fault_page == NULL) { + r = -ENOMEM; + goto out_free_0; + } + + fault_pfn = page_to_pfn(fault_page); + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; @@ -2269,6 +2316,10 @@ out_free_1: out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: + if (fault_page) + __free_page(fault_page); + if (hwpoison_page) + __free_page(hwpoison_page); __free_page(bad_page); out: kvm_arch_exit(); @@ -2290,6 +2341,7 @@ void kvm_exit(void) kvm_arch_hardware_unsetup(); kvm_arch_exit(); free_cpumask_var(cpus_hardware_enabled); + __free_page(hwpoison_page); __free_page(bad_page); } EXPORT_SYMBOL_GPL(kvm_exit); |