From b5a33a75720c03d58d8281a72b45ffd214f00ed7 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 15 Apr 2007 16:31:09 +0300 Subject: KVM: Use slab caches to allocate mmu data structures Better leak detection, statistics, memory use, speed -- goodness all around. Signed-off-by: Avi Kivity --- drivers/kvm/kvm.h | 3 +++ drivers/kvm/kvm_main.c | 7 +++++++ drivers/kvm/mmu.c | 39 +++++++++++++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 4 deletions(-) (limited to 'drivers/kvm') diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index fceeb840a25..b9c318a9e33 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -433,6 +433,9 @@ extern struct kvm_arch_ops *kvm_arch_ops; int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); void kvm_exit_arch(void); +int kvm_mmu_module_init(void); +void kvm_mmu_module_exit(void); + void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 0b30631585b..ab4dbd7fa5f 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -3063,6 +3063,10 @@ static __init int kvm_init(void) static struct page *bad_page; int r; + r = kvm_mmu_module_init(); + if (r) + goto out4; + r = register_filesystem(&kvm_fs_type); if (r) goto out3; @@ -3091,6 +3095,8 @@ out: out2: unregister_filesystem(&kvm_fs_type); out3: + kvm_mmu_module_exit(); +out4: return r; } @@ -3100,6 +3106,7 @@ static __exit void kvm_exit(void) __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); mntput(kvmfs_mnt); unregister_filesystem(&kvm_fs_type); + kvm_mmu_module_exit(); } module_init(kvm_init) diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 9ff74805c7d..a368ea8297f 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -159,6 +159,9 @@ struct kvm_rmap_desc { struct kvm_rmap_desc *more; }; +static struct kmem_cache *pte_chain_cache; +static struct kmem_cache *rmap_desc_cache; + static int is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->cr0 & CR0_WP_MASK; @@ -196,14 +199,14 @@ static int is_rmap_pte(u64 pte) } static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - size_t objsize, int min) + struct kmem_cache *base_cache, int min) { void *obj; if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - obj = kzalloc(objsize, GFP_NOWAIT); + obj = kmem_cache_zalloc(base_cache, GFP_NOWAIT); if (!obj) return -ENOMEM; cache->objects[cache->nobjs++] = obj; @@ -222,11 +225,11 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) int r; r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, - sizeof(struct kvm_pte_chain), 4); + pte_chain_cache, 4); if (r) goto out; r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, - sizeof(struct kvm_rmap_desc), 1); + rmap_desc_cache, 1); out: return r; } @@ -1333,6 +1336,34 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu) init_kvm_mmu(vcpu); } +void kvm_mmu_module_exit(void) +{ + if (pte_chain_cache) + kmem_cache_destroy(pte_chain_cache); + if (rmap_desc_cache) + kmem_cache_destroy(rmap_desc_cache); +} + +int kvm_mmu_module_init(void) +{ + pte_chain_cache = kmem_cache_create("kvm_pte_chain", + sizeof(struct kvm_pte_chain), + 0, 0, NULL, NULL); + if (!pte_chain_cache) + goto nomem; + rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", + sizeof(struct kvm_rmap_desc), + 0, 0, NULL, NULL); + if (!rmap_desc_cache) + goto nomem; + + return 0; + +nomem: + kvm_mmu_module_exit(); + return -ENOMEM; +} + #ifdef AUDIT static const char *audit_msg; -- cgit v1.2.3-70-g09d2