Rename the memory helpers that will soon be moved to common code and be
made globaly available via linux/kvm_host.h.  "mmu" alone is not a
sufficient namespace for globally available KVM symbols.

Opportunistically add "nr_" in mmu_memory_cache_free_objects() to make
it clear the function returns the number of free objects, as opposed to
freeing existing objects.

Suggested-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 42 +++++++++++++++++++++---------------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8d66cf558f1b..b85d3e8e8403 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1071,7 +1071,7 @@ static inline void *mmu_memory_cache_alloc_obj(struct 
kvm_mmu_memory_cache *mc,
                return (void *)__get_free_page(gfp_flags);
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
+static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
 {
        void *obj;
 
@@ -1086,12 +1086,12 @@ static int mmu_topup_memory_cache(struct 
kvm_mmu_memory_cache *mc, int min)
        return 0;
 }
 
-static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *mc)
+static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache 
*mc)
 {
        return mc->nobjs;
 }
 
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 {
        while (mc->nobjs) {
                if (mc->kmem_cache)
@@ -1106,33 +1106,33 @@ static int mmu_topup_memory_caches(struct kvm_vcpu 
*vcpu, bool maybe_indirect)
        int r;
 
        /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
-                                  1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
+       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+                                      1 + PT64_ROOT_MAX_LEVEL + 
PTE_PREFETCH_NUM);
        if (r)
                return r;
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
-                                  PT64_ROOT_MAX_LEVEL);
+       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
+                                      PT64_ROOT_MAX_LEVEL);
        if (r)
                return r;
        if (maybe_indirect) {
-               r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-                                          PT64_ROOT_MAX_LEVEL);
+               r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+                                              PT64_ROOT_MAX_LEVEL);
                if (r)
                        return r;
        }
-       return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
-                                     PT64_ROOT_MAX_LEVEL);
+       return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
+                                         PT64_ROOT_MAX_LEVEL);
 }
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
 {
        void *p;
 
@@ -1146,7 +1146,7 @@ static void *mmu_memory_cache_alloc(struct 
kvm_mmu_memory_cache *mc)
 
 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
+       return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
 }
 
 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -1417,7 +1417,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
        struct kvm_mmu_memory_cache *mc;
 
        mc = &vcpu->arch.mmu_pte_list_desc_cache;
-       return mmu_memory_cache_free_objects(mc);
+       return kvm_mmu_memory_cache_nr_free_objects(mc);
 }
 
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -2104,10 +2104,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct 
kvm_vcpu *vcpu, int direct
 {
        struct kvm_mmu_page *sp;
 
-       sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-       sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
+       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
        if (!direct)
-               sp->gfns = 
mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+               sp->gfns = 
kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
        /*
-- 
2.26.0

Reply via email to