From 126a5af520eff9b99a0bb1ca4bb4a0b7973f7d5a Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Wed, 3 Jul 2013 16:30:53 +0200 Subject: KVM: kvm-io: support cookies Add new functions kvm_io_bus_{read,write}_cookie() that allows users of the kvm io infrastructure to use a cookie value to speed up lookup of a device on an io bus. Signed-off-by: Cornelia Huck Signed-off-by: Paolo Bonzini Signed-off-by: Gleb Natapov --- virt/kvm/kvm_main.c | 108 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 92 insertions(+), 16 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1580dd4ace4..71960fb03c2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2863,13 +2863,33 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, return off; } +static int __kvm_io_bus_write(struct kvm_io_bus *bus, + struct kvm_io_range *range, const void *val) +{ + int idx; + + idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); + if (idx < 0) + return -EOPNOTSUPP; + + while (idx < bus->dev_count && + kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, + range->len, val)) + return idx; + idx++; + } + + return -EOPNOTSUPP; +} + /* kvm_io_bus_write - called under kvm->slots_lock */ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val) { - int idx; struct kvm_io_bus *bus; struct kvm_io_range range; + int r; range = (struct kvm_io_range) { .addr = addr, @@ -2877,14 +2897,52 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); - idx = kvm_io_bus_get_first_dev(bus, addr, len); + r = __kvm_io_bus_write(bus, &range, val); + return r < 0 ? r : 0; +} + +/* kvm_io_bus_write_cookie - called under kvm->slots_lock */ +int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, const void *val, long cookie) +{ + struct kvm_io_bus *bus; + struct kvm_io_range range; + + range = (struct kvm_io_range) { + .addr = addr, + .len = len, + }; + + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); + + /* First try the device referenced by cookie. */ + if ((cookie >= 0) && (cookie < bus->dev_count) && + (kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, + val)) + return cookie; + + /* + * cookie contained garbage; fall back to search and return the + * correct cookie value. + */ + return __kvm_io_bus_write(bus, &range, val); +} + +static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, + void *val) +{ + int idx; + + idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); if (idx < 0) return -EOPNOTSUPP; while (idx < bus->dev_count && - kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) { - if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val)) - return 0; + kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, + range->len, val)) + return idx; idx++; } @@ -2895,9 +2953,9 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, void *val) { - int idx; struct kvm_io_bus *bus; struct kvm_io_range range; + int r; range = (struct kvm_io_range) { .addr = addr, @@ -2905,18 +2963,36 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); - idx = kvm_io_bus_get_first_dev(bus, addr, len); - if (idx < 0) - return -EOPNOTSUPP; + r = __kvm_io_bus_read(bus, &range, val); + return r < 0 ? r : 0; +} - while (idx < bus->dev_count && - kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) { - if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val)) - return 0; - idx++; - } +/* kvm_io_bus_read_cookie - called under kvm->slots_lock */ +int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, void *val, long cookie) +{ + struct kvm_io_bus *bus; + struct kvm_io_range range; - return -EOPNOTSUPP; + range = (struct kvm_io_range) { + .addr = addr, + .len = len, + }; + + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); + + /* First try the device referenced by cookie. */ + if ((cookie >= 0) && (cookie < bus->dev_count) && + (kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + if (!kvm_iodevice_read(bus->range[cookie].dev, addr, len, + val)) + return cookie; + + /* + * cookie contained garbage; fall back to search and return the + * correct cookie value. + */ + return __kvm_io_bus_read(bus, &range, val); } /* Caller must hold slots_lock. */ -- cgit v1.2.3-70-g09d2 From e59dbe09f8e6fb8f6ee19dc79d1a2f14299e4cd2 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 4 Jul 2013 13:40:29 +0900 Subject: KVM: Introduce kvm_arch_memslots_updated() This is called right after the memslots is updated, i.e. when the result of update_memslots() gets installed in install_new_memslots(). Since the memslots needs to be updated twice when we delete or move a memslot, kvm_arch_commit_memory_region() does not correspond to this exactly. In the following patch, x86 will use this new API to check if the mmio generation has reached its maximum value, in which case mmio sptes need to be flushed out. Signed-off-by: Takuya Yoshikawa Acked-by: Alexander Graf Reviewed-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/arm/kvm/arm.c | 4 ++++ arch/ia64/kvm/kvm-ia64.c | 4 ++++ arch/mips/kvm/kvm_mips.c | 4 ++++ arch/powerpc/kvm/powerpc.c | 4 ++++ arch/s390/kvm/kvm-s390.c | 4 ++++ arch/x86/kvm/x86.c | 4 ++++ include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 5 ++++- 8 files changed, 29 insertions(+), 1 deletion(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 741f66a2edb..9c697db2787 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -219,6 +219,10 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 5b2dc0d10c8..bdfd8789b37 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1560,6 +1560,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index dd203e59e6f..a7b044536de 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -208,6 +208,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6316ee336e8..ae63ae4a1a5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -420,6 +420,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return kvmppc_core_create_memslot(slot, npages); } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba694d2ba51..a3d797b689a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1056,6 +1056,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) return 0; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d21bce50531..9dd8799e87c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7019,6 +7019,10 @@ out_free: return -ENOMEM; } +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ec590aece36..c11c7686ae5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -503,6 +503,7 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); +void kvm_arch_memslots_updated(struct kvm *kvm); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 71960fb03c2..a86735d80ee 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -731,7 +731,10 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, update_memslots(slots, new, kvm->memslots->generation); rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); - return old_memslots; + + kvm_arch_memslots_updated(kvm); + + return old_memslots; } /* -- cgit v1.2.3-70-g09d2 From a343c9b7673e2228bc8a9ac65aae42140f6f9977 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 16 Jul 2013 13:03:29 +0200 Subject: KVM: introduce __kvm_io_bus_sort_cmp kvm_io_bus_sort_cmp is used also directly, not just as a callback for sort and bsearch. In these cases, it is handy to have a type-safe variant. This patch introduces such a variant, __kvm_io_bus_sort_cmp, and uses it throughout kvm_main.c. Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a86735d80ee..c6c8bbea174 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2815,11 +2815,9 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) kfree(bus); } -static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) +static inline int __kvm_io_bus_sort_cmp(const struct kvm_io_range *r1, + const struct kvm_io_range *r2) { - const struct kvm_io_range *r1 = p1; - const struct kvm_io_range *r2 = p2; - if (r1->addr < r2->addr) return -1; if (r1->addr + r1->len > r2->addr + r2->len) @@ -2827,6 +2825,11 @@ static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) return 0; } +static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) +{ + return __kvm_io_bus_sort_cmp(p1, p2); +} + static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, gpa_t addr, int len) { @@ -2860,7 +2863,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, off = range - bus->range; - while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0) + while (off > 0 && __kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0) off--; return off; @@ -2876,7 +2879,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus, return -EOPNOTSUPP; while (idx < bus->dev_count && - kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + __kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, range->len, val)) return idx; @@ -2920,7 +2923,7 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && - (kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + (__kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, val)) return cookie; @@ -2942,7 +2945,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, return -EOPNOTSUPP; while (idx < bus->dev_count && - kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + __kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, range->len, val)) return idx; @@ -2986,7 +2989,7 @@ int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && - (kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + (__kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) if (!kvm_iodevice_read(bus->range[cookie].dev, addr, len, val)) return cookie; -- cgit v1.2.3-70-g09d2 From 24009b0549de563006705b9af8694fc8fc9a5aa1 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Sat, 24 Aug 2013 22:14:07 +0200 Subject: kvm: use anon_inode_getfd() with O_CLOEXEC flag KVM uses anon_inode_get() to allocate file descriptors as part of some of its ioctls. But those ioctls are lacking a flag argument allowing userspace to choose options for the newly opened file descriptor. In such case it's advised to use O_CLOEXEC by default so that userspace is allowed to choose, without race, if the file descriptor is going to be inherited across exec(). This patch set O_CLOEXEC flag on all file descriptors created with anon_inode_getfd() to not leak file descriptors across exec(). Signed-off-by: Yann Droneaud Link: http://lkml.kernel.org/r/cover.1377372576.git.ydroneaud@opteya.com Reviewed-by: Paolo Bonzini Signed-off-by: Gleb Natapov --- virt/kvm/kvm_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c6c8bbea174..d2836788561 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1896,7 +1896,7 @@ static struct file_operations kvm_vcpu_fops = { */ static int create_vcpu_fd(struct kvm_vcpu *vcpu) { - return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); + return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); } /* @@ -2305,7 +2305,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm, return ret; } - ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR); + ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { ops->destroy(dev); return ret; @@ -2589,7 +2589,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) return r; } #endif - r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); + r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); if (r < 0) kvm_put_kvm(kvm); -- cgit v1.2.3-70-g09d2 From 11feeb498086a3a5907b8148bdf1786a9b18fc55 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 25 Jul 2013 03:04:38 +0200 Subject: kvm: optimize away THP checks in kvm_is_mmio_pfn() The checks on PG_reserved in the page structure on head and tail pages aren't necessary because split_huge_page wouldn't transfer the PG_reserved bit from head to tail anyway. This was a forward-thinking check done in the case PageReserved was set by a driver-owned page mapped in userland with something like remap_pfn_range in a VM_PFNMAP region, but using hugepmds (not possible right now). It was meant to be very safe, but it's overkill as it's unlikely split_huge_page could ever run without the driver noticing and tearing down the hugepage itself. And if a driver in the future will really want to map a reserved hugepage in userland using an huge pmd it should simply take care of marking all subpages reserved too to keep KVM safe. This of course would require such a hypothetical driver to tear down the huge pmd itself and splitting the hugepage itself, instead of relaying on split_huge_page, but that sounds very reasonable, especially considering split_huge_page wouldn't currently transfer the reserved bit anyway. Signed-off-by: Andrea Arcangeli Signed-off-by: Gleb Natapov --- virt/kvm/kvm_main.c | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d2836788561..0fc25aed79a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -102,28 +102,8 @@ static bool largepages_enabled = true; bool kvm_is_mmio_pfn(pfn_t pfn) { - if (pfn_valid(pfn)) { - int reserved; - struct page *tail = pfn_to_page(pfn); - struct page *head = compound_trans_head(tail); - reserved = PageReserved(head); - if (head != tail) { - /* - * "head" is not a dangling pointer - * (compound_trans_head takes care of that) - * but the hugepage may have been splitted - * from under us (and we may not hold a - * reference count on the head page so it can - * be reused before we run PageReferenced), so - * we've to check PageTail before returning - * what we just read. - */ - smp_rmb(); - if (PageTail(tail)) - return reserved; - } - return PageReserved(tail); - } + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); return true; } -- cgit v1.2.3-70-g09d2 From c21fbff16b5d43d608966a2963fb248bebce257f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 27 Aug 2013 15:41:41 +0200 Subject: KVM: rename __kvm_io_bus_sort_cmp to kvm_io_bus_cmp This is the type-safe comparison function, so the double-underscore is not related. Signed-off-by: Paolo Bonzini Signed-off-by: Gleb Natapov --- virt/kvm/kvm_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0fc25aed79a..bf040c4e02b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2795,8 +2795,8 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) kfree(bus); } -static inline int __kvm_io_bus_sort_cmp(const struct kvm_io_range *r1, - const struct kvm_io_range *r2) +static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, + const struct kvm_io_range *r2) { if (r1->addr < r2->addr) return -1; @@ -2807,7 +2807,7 @@ static inline int __kvm_io_bus_sort_cmp(const struct kvm_io_range *r1, static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) { - return __kvm_io_bus_sort_cmp(p1, p2); + return kvm_io_bus_cmp(p1, p2); } static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, @@ -2843,7 +2843,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, off = range - bus->range; - while (off > 0 && __kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0) + while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) off--; return off; @@ -2859,7 +2859,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus, return -EOPNOTSUPP; while (idx < bus->dev_count && - __kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, range->len, val)) return idx; @@ -2903,7 +2903,7 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && - (__kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, val)) return cookie; @@ -2925,7 +2925,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, return -EOPNOTSUPP; while (idx < bus->dev_count && - __kvm_io_bus_sort_cmp(range, &bus->range[idx]) == 0) { + kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, range->len, val)) return idx; @@ -2969,7 +2969,7 @@ int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && - (__kvm_io_bus_sort_cmp(&range, &bus->range[cookie]) == 0)) + (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) if (!kvm_iodevice_read(bus->range[cookie].dev, addr, len, val)) return cookie; -- cgit v1.2.3-70-g09d2