KVM: Use slab caches to allocate mmu data structures

Better leak detection, statistics, memory use, speed -- goodness all
around.

Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Avi Kivity 2007-04-15 16:31:09 +03:00
parent 417726a3fb
commit b5a33a7572
3 changed files with 45 additions and 4 deletions

View File

@ -433,6 +433,9 @@ extern struct kvm_arch_ops *kvm_arch_ops;
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
void kvm_exit_arch(void); void kvm_exit_arch(void);
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu); void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu);

View File

@ -3063,6 +3063,10 @@ static __init int kvm_init(void)
static struct page *bad_page; static struct page *bad_page;
int r; int r;
r = kvm_mmu_module_init();
if (r)
goto out4;
r = register_filesystem(&kvm_fs_type); r = register_filesystem(&kvm_fs_type);
if (r) if (r)
goto out3; goto out3;
@ -3091,6 +3095,8 @@ static __init int kvm_init(void)
out2: out2:
unregister_filesystem(&kvm_fs_type); unregister_filesystem(&kvm_fs_type);
out3: out3:
kvm_mmu_module_exit();
out4:
return r; return r;
} }
@ -3100,6 +3106,7 @@ static __exit void kvm_exit(void)
__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
mntput(kvmfs_mnt); mntput(kvmfs_mnt);
unregister_filesystem(&kvm_fs_type); unregister_filesystem(&kvm_fs_type);
kvm_mmu_module_exit();
} }
module_init(kvm_init) module_init(kvm_init)

View File

@ -159,6 +159,9 @@ struct kvm_rmap_desc {
struct kvm_rmap_desc *more; struct kvm_rmap_desc *more;
}; };
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
static int is_write_protection(struct kvm_vcpu *vcpu) static int is_write_protection(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr0 & CR0_WP_MASK; return vcpu->cr0 & CR0_WP_MASK;
@ -196,14 +199,14 @@ static int is_rmap_pte(u64 pte)
} }
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
size_t objsize, int min) struct kmem_cache *base_cache, int min)
{ {
void *obj; void *obj;
if (cache->nobjs >= min) if (cache->nobjs >= min)
return 0; return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) { while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kzalloc(objsize, GFP_NOWAIT); obj = kmem_cache_zalloc(base_cache, GFP_NOWAIT);
if (!obj) if (!obj)
return -ENOMEM; return -ENOMEM;
cache->objects[cache->nobjs++] = obj; cache->objects[cache->nobjs++] = obj;
@ -222,11 +225,11 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
int r; int r;
r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
sizeof(struct kvm_pte_chain), 4); pte_chain_cache, 4);
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
sizeof(struct kvm_rmap_desc), 1); rmap_desc_cache, 1);
out: out:
return r; return r;
} }
@ -1333,6 +1336,34 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu); init_kvm_mmu(vcpu);
} }
void kvm_mmu_module_exit(void)
{
if (pte_chain_cache)
kmem_cache_destroy(pte_chain_cache);
if (rmap_desc_cache)
kmem_cache_destroy(rmap_desc_cache);
}
int kvm_mmu_module_init(void)
{
pte_chain_cache = kmem_cache_create("kvm_pte_chain",
sizeof(struct kvm_pte_chain),
0, 0, NULL, NULL);
if (!pte_chain_cache)
goto nomem;
rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
sizeof(struct kvm_rmap_desc),
0, 0, NULL, NULL);
if (!rmap_desc_cache)
goto nomem;
return 0;
nomem:
kvm_mmu_module_exit();
return -ENOMEM;
}
#ifdef AUDIT #ifdef AUDIT
static const char *audit_msg; static const char *audit_msg;