Clean the pre-allocated cache pages before entering mmu_lock region. This is safe since the caches are per-vcpu.
Smaller chunks are already zeroed by kmem_cache_zalloc. ~= 0.90% reduction in system time with AIM7 on RHEL3 / 4-vcpu. Signed-off-by: Marcelo Tosatti <[email protected]> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 10bdb2a..823d0cd 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -301,7 +301,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = alloc_page(GFP_KERNEL); + page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (!page) return -ENOMEM; set_page_private(page, 0); @@ -352,7 +352,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, BUG_ON(!mc->nobjs); p = mc->objects[--mc->nobjs]; - memset(p, 0, size); return p; } -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to [email protected] More majordomo info at http://vger.kernel.org/majordomo-info.html
