Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/arm64/include/asm/kvm_host.h  | 12 -------
 arch/arm64/include/asm/kvm_types.h |  2 ++
 arch/arm64/kvm/mmu.c               | 51 ++++++------------------------
 3 files changed, 12 insertions(+), 53 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 2385dede96e0..d221b6b129fd 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,18 +97,6 @@ struct kvm_arch {
        bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-       int nobjs;
-       gfp_t gfp_zero;
-       void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
        u32 esr_el2;            /* Hyp Syndrom Register */
        u64 far_el2;            /* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h 
b/arch/arm64/include/asm/kvm_types.h
index d0987007d581..9a126b9e2d7c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -2,5 +2,7 @@
 #ifndef _ASM_ARM64_KVM_TYPES_H
 #define _ASM_ARM64_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
 #endif /* _ASM_ARM64_KVM_TYPES_H */
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 688213ef34f0..976405e2fbb2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, 
phys_addr_t addr, pud_t *pudp)
        put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-       void *page;
-
-       if (cache->nobjs >= min)
-               return 0;
-       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-                                              cache->gfp_zero);
-               if (!page)
-                       return -ENOMEM;
-               cache->objects[cache->nobjs++] = page;
-       }
-       return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs)
-               free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-       void *p;
-
-       BUG_ON(!mc || !mc->nobjs);
-       p = mc->objects[--mc->nobjs];
-       return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t 
addr)
 {
        pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct 
kvm_mmu_memory_cache *cache
        if (stage2_pgd_none(kvm, *pgd)) {
                if (!cache)
                        return NULL;
-               pud = mmu_memory_cache_alloc(cache);
+               pud = kvm_mmu_memory_cache_alloc(cache);
                stage2_pgd_populate(kvm, pgd, pud);
                get_page(virt_to_page(pgd));
        }
@@ -1045,7 +1014,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct 
kvm_mmu_memory_cache *cache
        if (stage2_pud_none(kvm, *pud)) {
                if (!cache)
                        return NULL;
-               pmd = mmu_memory_cache_alloc(cache);
+               pmd = kvm_mmu_memory_cache_alloc(cache);
                stage2_pud_populate(kvm, pud, pmd);
                get_page(virt_to_page(pud));
        }
@@ -1251,7 +1220,7 @@ static int stage2_set_pte(struct kvm *kvm, struct 
kvm_mmu_memory_cache *cache,
        if (stage2_pud_none(kvm, *pud)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
-               pmd = mmu_memory_cache_alloc(cache);
+               pmd = kvm_mmu_memory_cache_alloc(cache);
                stage2_pud_populate(kvm, pud, pmd);
                get_page(virt_to_page(pud));
        }
@@ -1276,7 +1245,7 @@ static int stage2_set_pte(struct kvm *kvm, struct 
kvm_mmu_memory_cache *cache,
        if (pmd_none(*pmd)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
-               pte = mmu_memory_cache_alloc(cache);
+               pte = kvm_mmu_memory_cache_alloc(cache);
                kvm_pmd_populate(pmd, pte);
                get_page(virt_to_page(pmd));
        }
@@ -1343,7 +1312,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t 
guest_ipa,
        phys_addr_t addr, end;
        int ret = 0;
        unsigned long pfn;
-       struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+       struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
        end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
        pfn = __phys_to_pfn(pa);
@@ -1354,8 +1323,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t 
guest_ipa,
                if (writable)
                        pte = kvm_s2pte_mkwrite(pte);
 
-               ret = mmu_topup_memory_cache(&cache,
-                                            kvm_mmu_cache_min_pages(kvm));
+               ret = kvm_mmu_topup_memory_cache(&cache,
+                                                kvm_mmu_cache_min_pages(kvm));
                if (ret)
                        goto out;
                spin_lock(&kvm->mmu_lock);
@@ -1369,7 +1338,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t 
guest_ipa,
        }
 
 out:
-       mmu_free_memory_cache(&cache);
+       kvm_mmu_free_memory_cache(&cache);
        return ret;
 }
 
@@ -1735,7 +1704,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        up_read(&current->mm->mmap_sem);
 
        /* We need minimum second+third level pages */
-       ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+       ret = kvm_mmu_topup_memory_cache(memcache, 
kvm_mmu_cache_min_pages(kvm));
        if (ret)
                return ret;
 
@@ -2158,7 +2127,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)
-- 
2.26.0

Reply via email to