diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 186 |
1 files changed, 152 insertions, 34 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index b10972ed0c9..af866147ff2 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -34,6 +34,8 @@ #include <linux/highmem.h> #include <linux/file.h> #include <asm/desc.h> +#include <linux/sysdev.h> +#include <linux/cpu.h> #include "x86_emulate.h" #include "segment_descriptor.h" @@ -41,6 +43,9 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +static DEFINE_SPINLOCK(kvm_lock); +static LIST_HEAD(vm_list); + struct kvm_arch_ops *kvm_arch_ops; struct kvm_stat kvm_stat; EXPORT_SYMBOL_GPL(kvm_stat); @@ -62,7 +67,7 @@ static struct kvm_stats_debugfs_item { { "halt_exits", &kvm_stat.halt_exits }, { "request_irq", &kvm_stat.request_irq_exits }, { "irq_exits", &kvm_stat.irq_exits }, - { 0, 0 } + { NULL, NULL } }; static struct dentry *debugfs_dir; @@ -205,7 +210,7 @@ static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) mutex_lock(&vcpu->mutex); if (unlikely(!vcpu->vmcs)) { mutex_unlock(&vcpu->mutex); - return 0; + return NULL; } return kvm_arch_ops->vcpu_load(vcpu); } @@ -230,9 +235,13 @@ static int kvm_dev_open(struct inode *inode, struct file *filp) struct kvm_vcpu *vcpu = &kvm->vcpus[i]; mutex_init(&vcpu->mutex); + vcpu->cpu = -1; vcpu->kvm = kvm; vcpu->mmu.root_hpa = INVALID_PAGE; INIT_LIST_HEAD(&vcpu->free_pages); + spin_lock(&kvm_lock); + list_add(&kvm->vm_list, &vm_list); + spin_unlock(&kvm_lock); } filp->private_data = kvm; return 0; @@ -257,9 +266,9 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, if (!dont || free->dirty_bitmap != dont->dirty_bitmap) vfree(free->dirty_bitmap); - free->phys_mem = 0; + free->phys_mem = NULL; free->npages = 0; - free->dirty_bitmap = 0; + free->dirty_bitmap = NULL; } static void kvm_free_physmem(struct kvm *kvm) @@ -267,12 +276,14 @@ static void kvm_free_physmem(struct kvm *kvm) int i; for (i = 0; i < kvm->nmemslots; ++i) - kvm_free_physmem_slot(&kvm->memslots[i], 0); + kvm_free_physmem_slot(&kvm->memslots[i], NULL); } static void kvm_free_vcpu(struct kvm_vcpu *vcpu) { - vcpu_load(vcpu->kvm, vcpu_slot(vcpu)); + if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu))) + return; + kvm_mmu_destroy(vcpu); vcpu_put(vcpu); kvm_arch_ops->vcpu_free(vcpu); @@ -290,6 +301,9 @@ static int kvm_dev_release(struct inode *inode, struct file *filp) { struct kvm *kvm = filp->private_data; + spin_lock(&kvm_lock); + list_del(&kvm->vm_list); + spin_unlock(&kvm_lock); kvm_free_vcpus(kvm); kvm_free_physmem(kvm); kfree(kvm); @@ -544,7 +558,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n) FX_IMAGE_ALIGN); vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; - vcpu->cpu = -1; /* First load will set up TR */ r = kvm_arch_ops->vcpu_create(vcpu); if (r < 0) goto out_free_vcpus; @@ -640,11 +653,11 @@ raced: /* Deallocate if slot is being removed */ if (!npages) - new.phys_mem = 0; + new.phys_mem = NULL; /* Free page dirty bitmap if unneeded */ if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) - new.dirty_bitmap = 0; + new.dirty_bitmap = NULL; r = -ENOMEM; @@ -799,14 +812,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) && gfn < memslot->base_gfn + memslot->npages) return memslot; } - return 0; + return NULL; } EXPORT_SYMBOL_GPL(gfn_to_memslot); void mark_page_dirty(struct kvm *kvm, gfn_t gfn) { int i; - struct kvm_memory_slot *memslot = 0; + struct kvm_memory_slot *memslot = NULL; unsigned long rel_gfn; for (i = 0; i < kvm->nmemslots; ++i) { @@ -1360,6 +1373,9 @@ static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) if (!vcpu) return -ENOENT; + /* re-sync apic's tpr */ + vcpu->cr8 = kvm_run->cr8; + if (kvm_run->emulated) { kvm_arch_ops->skip_emulated_instruction(vcpu); kvm_run->emulated = 0; @@ -1778,6 +1794,7 @@ static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; + void __user *argp = (void __user *)arg; int r = -EINVAL; switch (ioctl) { @@ -1794,12 +1811,12 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_run kvm_run; r = -EFAULT; - if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) + if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) goto out; r = kvm_dev_ioctl_run(kvm, &kvm_run); if (r < 0 && r != -EINTR) goto out; - if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) { + if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { r = -EFAULT; goto out; } @@ -1809,13 +1826,13 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_regs kvm_regs; r = -EFAULT; - if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) + if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) goto out; r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs); if (r) goto out; r = -EFAULT; - if (copy_to_user((void *)arg, &kvm_regs, sizeof kvm_regs)) + if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs)) goto out; r = 0; break; @@ -1824,7 +1841,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_regs kvm_regs; r = -EFAULT; - if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) + if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) goto out; r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); if (r) @@ -1836,13 +1853,13 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_sregs kvm_sregs; r = -EFAULT; - if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) + if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) goto out; r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs); if (r) goto out; r = -EFAULT; - if (copy_to_user((void *)arg, &kvm_sregs, sizeof kvm_sregs)) + if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) goto out; r = 0; break; @@ -1851,7 +1868,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_sregs kvm_sregs; r = -EFAULT; - if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) + if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) goto out; r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); if (r) @@ -1863,13 +1880,13 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_translation tr; r = -EFAULT; - if (copy_from_user(&tr, (void *)arg, sizeof tr)) + if (copy_from_user(&tr, argp, sizeof tr)) goto out; r = kvm_dev_ioctl_translate(kvm, &tr); if (r) goto out; r = -EFAULT; - if (copy_to_user((void *)arg, &tr, sizeof tr)) + if (copy_to_user(argp, &tr, sizeof tr)) goto out; r = 0; break; @@ -1878,7 +1895,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_interrupt irq; r = -EFAULT; - if (copy_from_user(&irq, (void *)arg, sizeof irq)) + if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_dev_ioctl_interrupt(kvm, &irq); if (r) @@ -1890,7 +1907,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_debug_guest dbg; r = -EFAULT; - if (copy_from_user(&dbg, (void *)arg, sizeof dbg)) + if (copy_from_user(&dbg, argp, sizeof dbg)) goto out; r = kvm_dev_ioctl_debug_guest(kvm, &dbg); if (r) @@ -1902,7 +1919,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_memory_region kvm_mem; r = -EFAULT; - if (copy_from_user(&kvm_mem, (void *)arg, sizeof kvm_mem)) + if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) goto out; r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); if (r) @@ -1913,7 +1930,7 @@ static long kvm_dev_ioctl(struct file *filp, struct kvm_dirty_log log; r = -EFAULT; - if (copy_from_user(&log, (void *)arg, sizeof log)) + if (copy_from_user(&log, argp, sizeof log)) goto out; r = kvm_dev_ioctl_get_dirty_log(kvm, &log); if (r) @@ -1921,13 +1938,13 @@ static long kvm_dev_ioctl(struct file *filp, break; } case KVM_GET_MSRS: - r = msr_io(kvm, (void __user *)arg, get_msr, 1); + r = msr_io(kvm, argp, get_msr, 1); break; case KVM_SET_MSRS: - r = msr_io(kvm, (void __user *)arg, do_set_msr, 0); + r = msr_io(kvm, argp, do_set_msr, 0); break; case KVM_GET_MSR_INDEX_LIST: { - struct kvm_msr_list __user *user_msr_list = (void __user *)arg; + struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; @@ -2014,7 +2031,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, * in vmx root mode. */ printk(KERN_INFO "kvm: exiting hardware virtualization\n"); - on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); + on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); } return NOTIFY_OK; } @@ -2024,11 +2041,69 @@ static struct notifier_block kvm_reboot_notifier = { .priority = 0, }; +/* + * Make sure that a cpu that is being hot-unplugged does not have any vcpus + * cached on it. + */ +static void decache_vcpus_on_cpu(int cpu) +{ + struct kvm *vm; + struct kvm_vcpu *vcpu; + int i; + + spin_lock(&kvm_lock); + list_for_each_entry(vm, &vm_list, vm_list) + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + vcpu = &vm->vcpus[i]; + /* + * If the vcpu is locked, then it is running on some + * other cpu and therefore it is not cached on the + * cpu in question. + * + * If it's not locked, check the last cpu it executed + * on. + */ + if (mutex_trylock(&vcpu->mutex)) { + if (vcpu->cpu == cpu) { + kvm_arch_ops->vcpu_decache(vcpu); + vcpu->cpu = -1; + } + mutex_unlock(&vcpu->mutex); + } + } + spin_unlock(&kvm_lock); +} + +static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, + void *v) +{ + int cpu = (long)v; + + switch (val) { + case CPU_DEAD: + case CPU_UP_CANCELED: + decache_vcpus_on_cpu(cpu); + smp_call_function_single(cpu, kvm_arch_ops->hardware_disable, + NULL, 0, 1); + break; + case CPU_UP_PREPARE: + smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, + NULL, 0, 1); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block kvm_cpu_notifier = { + .notifier_call = kvm_cpu_hotplug, + .priority = 20, /* must be > scheduler priority */ +}; + static __init void kvm_init_debug(void) { struct kvm_stats_debugfs_item *p; - debugfs_dir = debugfs_create_dir("kvm", 0); + debugfs_dir = debugfs_create_dir("kvm", NULL); for (p = debugfs_entries; p->name; ++p) p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir, p->data); @@ -2043,6 +2118,30 @@ static void kvm_exit_debug(void) debugfs_remove(debugfs_dir); } +static int kvm_suspend(struct sys_device *dev, pm_message_t state) +{ + decache_vcpus_on_cpu(raw_smp_processor_id()); + on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); + return 0; +} + +static int kvm_resume(struct sys_device *dev) +{ + on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); + return 0; +} + +static struct sysdev_class kvm_sysdev_class = { + set_kset_name("kvm"), + .suspend = kvm_suspend, + .resume = kvm_resume, +}; + +static struct sys_device kvm_sysdev = { + .id = 0, + .cls = &kvm_sysdev_class, +}; + hpa_t bad_page_address; int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) @@ -2069,9 +2168,20 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) if (r < 0) return r; - on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); + on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); + r = register_cpu_notifier(&kvm_cpu_notifier); + if (r) + goto out_free_1; register_reboot_notifier(&kvm_reboot_notifier); + r = sysdev_class_register(&kvm_sysdev_class); + if (r) + goto out_free_2; + + r = sysdev_register(&kvm_sysdev); + if (r) + goto out_free_3; + kvm_chardev_ops.owner = module; r = misc_register(&kvm_dev); @@ -2083,8 +2193,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) return r; out_free: + sysdev_unregister(&kvm_sysdev); +out_free_3: + sysdev_class_unregister(&kvm_sysdev_class); +out_free_2: unregister_reboot_notifier(&kvm_reboot_notifier); - on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); + unregister_cpu_notifier(&kvm_cpu_notifier); +out_free_1: + on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); kvm_arch_ops->hardware_unsetup(); return r; } @@ -2092,9 +2208,11 @@ out_free: void kvm_exit_arch(void) { misc_deregister(&kvm_dev); - + sysdev_unregister(&kvm_sysdev); + sysdev_class_unregister(&kvm_sysdev_class); unregister_reboot_notifier(&kvm_reboot_notifier); - on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); + unregister_cpu_notifier(&kvm_cpu_notifier); + on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); kvm_arch_ops->hardware_unsetup(); kvm_arch_ops = NULL; } |