diff options
Diffstat (limited to 'virt/kvm/coalesced_mmio.c')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 131 |
1 files changed, 60 insertions, 71 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index fc8487564d1..a6ec206f36b 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -24,10 +24,19 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) { - struct kvm_coalesced_mmio_zone *zone; + /* is it in a batchable area ? + * (addr,len) is fully included in + * (zone->addr, zone->size) + */ + + return (dev->zone.addr <= addr && + addr + len <= dev->zone.addr + dev->zone.size); +} + +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) +{ struct kvm_coalesced_mmio_ring *ring; unsigned avail; - int i; /* Are we able to batch it ? */ @@ -37,25 +46,12 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; - if (avail < KVM_MAX_VCPUS) { + if (avail == 0) { /* full */ return 0; } - /* is it in a batchable area ? */ - - for (i = 0; i < dev->nb_zones; i++) { - zone = &dev->zone[i]; - - /* (addr,len) is fully included in - * (zone->addr, zone->size) - */ - - if (zone->addr <= addr && - addr + len <= zone->addr + zone->size) - return 1; - } - return 0; + return 1; } static int coalesced_mmio_write(struct kvm_io_device *this, @@ -63,10 +59,16 @@ static int coalesced_mmio_write(struct kvm_io_device *this, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; + if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; - spin_lock(&dev->lock); + spin_lock(&dev->kvm->ring_lock); + + if (!coalesced_mmio_has_room(dev)) { + spin_unlock(&dev->kvm->ring_lock); + return -EOPNOTSUPP; + } /* copy data in first free entry of the ring */ @@ -75,7 +77,7 @@ static int coalesced_mmio_write(struct kvm_io_device *this, memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; - spin_unlock(&dev->lock); + spin_unlock(&dev->kvm->ring_lock); return 0; } @@ -83,6 +85,8 @@ static void coalesced_mmio_destructor(struct kvm_io_device *this) { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); + list_del(&dev->list); + kfree(dev); } @@ -93,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = { int kvm_coalesced_mmio_init(struct kvm *kvm) { - struct kvm_coalesced_mmio_dev *dev; struct page *page; int ret; @@ -101,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto out_err; - kvm->coalesced_mmio_ring = page_address(page); - - ret = -ENOMEM; - dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); - if (!dev) - goto out_free_page; - spin_lock_init(&dev->lock); - kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); - dev->kvm = kvm; - kvm->coalesced_mmio_dev = dev; - mutex_lock(&kvm->slots_lock); - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); - mutex_unlock(&kvm->slots_lock); - if (ret < 0) - goto out_free_dev; + ret = 0; + kvm->coalesced_mmio_ring = page_address(page); - return ret; + /* + * We're using this spinlock to sync access to the coalesced ring. + * The list doesn't need it's own lock since device registration and + * unregistration should only happen when kvm->slots_lock is held. + */ + spin_lock_init(&kvm->ring_lock); + INIT_LIST_HEAD(&kvm->coalesced_zones); -out_free_dev: - kvm->coalesced_mmio_dev = NULL; - kfree(dev); -out_free_page: - kvm->coalesced_mmio_ring = NULL; - __free_page(page); out_err: return ret; } @@ -139,51 +129,50 @@ void kvm_coalesced_mmio_free(struct kvm *kvm) int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { - struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; + int ret; + struct kvm_coalesced_mmio_dev *dev; - if (dev == NULL) - return -ENXIO; + dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); + dev->kvm = kvm; + dev->zone = *zone; mutex_lock(&kvm->slots_lock); - if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { - mutex_unlock(&kvm->slots_lock); - return -ENOBUFS; - } + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, + zone->size, &dev->dev); + if (ret < 0) + goto out_free_dev; + list_add_tail(&dev->list, &kvm->coalesced_zones); + mutex_unlock(&kvm->slots_lock); - dev->zone[dev->nb_zones] = *zone; - dev->nb_zones++; + return ret; +out_free_dev: mutex_unlock(&kvm->slots_lock); + + kfree(dev); + + if (dev == NULL) + return -ENXIO; + return 0; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { - int i; - struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; - struct kvm_coalesced_mmio_zone *z; - - if (dev == NULL) - return -ENXIO; + struct kvm_coalesced_mmio_dev *dev, *tmp; mutex_lock(&kvm->slots_lock); - i = dev->nb_zones; - while (i) { - z = &dev->zone[i - 1]; - - /* unregister all zones - * included in (zone->addr, zone->size) - */ - - if (zone->addr <= z->addr && - z->addr + z->size <= zone->addr + zone->size) { - dev->nb_zones--; - *z = dev->zone[dev->nb_zones]; + list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) + if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); + kvm_iodevice_destructor(&dev->dev); } - i--; - } mutex_unlock(&kvm->slots_lock); |