diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-08-22 19:11:43 +0800 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 10:51:26 +0200 |
commit | 48987781eb1d1e8ded41f55cd5806615fda92c6e (patch) | |
tree | fa6db806ffec34588fbcaa434c33afac51f661e6 /virt | |
parent | 887c08ac191efb103e33e589aacbc2ce1a3f131e (diff) |
KVM: MMU: introduce gfn_to_page_many_atomic() function
Introduce this function to get consecutive gfn's pages, it can reduce
gup's overload, used by later patch
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 08bd304f8bc..2eb0b7500a2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -927,15 +927,25 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) return memslot - slots->memslots; } -unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, + gfn_t *nr_pages) { struct kvm_memory_slot *slot; slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); + + if (nr_pages) + *nr_pages = slot->npages - (gfn - slot->base_gfn); + return gfn_to_hva_memslot(slot, gfn); } + +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_hva_many(kvm, gfn, NULL); +} EXPORT_SYMBOL_GPL(gfn_to_hva); static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) @@ -1010,6 +1020,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm, return hva_to_pfn(kvm, addr, false); } +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, + int nr_pages) +{ + unsigned long addr; + gfn_t entry; + + addr = gfn_to_hva_many(kvm, gfn, &entry); + if (kvm_is_error_hva(addr)) + return -1; + + if (entry < nr_pages) + return 0; + + return __get_user_pages_fast(addr, nr_pages, 1, pages); +} +EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { pfn_t pfn; |