Move to the common MMU memory cache implementation now that the common
code and MIPS's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/mips/include/asm/kvm_host.h  | 11 ---------
 arch/mips/include/asm/kvm_types.h |  2 ++
 arch/mips/kvm/mmu.c               | 40 ++++---------------------------
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 363e7a89d173..f49617175f60 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -335,17 +335,6 @@ struct kvm_mips_tlb {
        long tlb_lo[2];
 };
 
-#define KVM_NR_MEM_OBJS     4
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-       int nobjs;
-       void *objects[KVM_NR_MEM_OBJS];
-};
-
 #define KVM_MIPS_AUX_FPU       0x1
 #define KVM_MIPS_AUX_MSA       0x2
 
diff --git a/arch/mips/include/asm/kvm_types.h 
b/arch/mips/include/asm/kvm_types.h
index 5efeb32a5926..213754d9ef6b 100644
--- a/arch/mips/include/asm/kvm_types.h
+++ b/arch/mips/include/asm/kvm_types.h
@@ -2,4 +2,6 @@
 #ifndef _ASM_MIPS_KVM_TYPES_H
 #define _ASM_MIPS_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE     4
+
 #endif /* _ASM_MIPS_KVM_TYPES_H */
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 41a4a063a730..d6acd88c0c46 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -25,39 +25,9 @@
 #define KVM_MMU_CACHE_MIN_PAGES 2
 #endif
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-       void *page;
-
-       if (cache->nobjs >= min)
-               return 0;
-       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
-               if (!page)
-                       return -ENOMEM;
-               cache->objects[cache->nobjs++] = page;
-       }
-       return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs)
-               free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-       void *p;
-
-       BUG_ON(!mc || !mc->nobjs);
-       p = mc->objects[--mc->nobjs];
-       return p;
-}
-
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 /**
@@ -151,7 +121,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct 
kvm_mmu_memory_cache *cache,
 
                if (!cache)
                        return NULL;
-               new_pmd = mmu_memory_cache_alloc(cache);
+               new_pmd = kvm_mmu_memory_cache_alloc(cache);
                pmd_init((unsigned long)new_pmd,
                         (unsigned long)invalid_pte_table);
                pud_populate(NULL, pud, new_pmd);
@@ -162,7 +132,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct 
kvm_mmu_memory_cache *cache,
 
                if (!cache)
                        return NULL;
-               new_pte = mmu_memory_cache_alloc(cache);
+               new_pte = kvm_mmu_memory_cache_alloc(cache);
                clear_page(new_pte);
                pmd_populate_kernel(NULL, pmd, new_pte);
        }
@@ -709,7 +679,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, 
unsigned long gpa,
                goto out;
 
        /* We need a minimum of cached pages ready for page table creation */
-       err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
+       err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
        if (err)
                goto out;
 
@@ -793,7 +763,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu 
*vcpu,
        int ret;
 
        /* We need a minimum of cached pages ready for page table creation */
-       ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
+       ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
        if (ret)
                return NULL;
 
-- 
2.26.0

Reply via email to