Move x86's memory cache helpers to common KVM code so that they can be
reused by arm64 and MIPS in future patches.

Suggested-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c   | 53 --------------------------------------
 include/linux/kvm_host.h |  7 +++++
 virt/kvm/kvm_main.c      | 55 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 62 insertions(+), 53 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b85d3e8e8403..a627437f73fd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1060,47 +1060,6 @@ static void walk_shadow_page_lockless_end(struct 
kvm_vcpu *vcpu)
        local_irq_enable();
 }
 
-static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
-                                              gfp_t gfp_flags)
-{
-       gfp_flags |= mc->gfp_zero;
-
-       if (mc->kmem_cache)
-               return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
-       else
-               return (void *)__get_free_page(gfp_flags);
-}
-
-static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
-{
-       void *obj;
-
-       if (mc->nobjs >= min)
-               return 0;
-       while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
-               obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
-               if (!obj)
-                       return mc->nobjs >= min ? 0 : -ENOMEM;
-               mc->objects[mc->nobjs++] = obj;
-       }
-       return 0;
-}
-
-static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache 
*mc)
-{
-       return mc->nobjs;
-}
-
-static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs) {
-               if (mc->kmem_cache)
-                       kmem_cache_free(mc->kmem_cache, 
mc->objects[--mc->nobjs]);
-               else
-                       free_page((unsigned long)mc->objects[--mc->nobjs]);
-       }
-}
-
 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
        int r;
@@ -1132,18 +1091,6 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
        kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
-static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-       void *p;
-
-       if (WARN_ON(!mc->nobjs))
-               p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
-       else
-               p = mc->objects[--mc->nobjs];
-       BUG_ON(!p);
-       return p;
-}
-
 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
 {
        return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d38d6b9c24be..802b9e2306f0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -815,6 +815,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool 
usermode_vcpu_not_eligible);
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+#endif
+
 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
                                 struct kvm_vcpu *except,
                                 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4db151f6101e..fead5f1d5594 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -342,6 +342,61 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
        kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 }
 
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
+                                              gfp_t gfp_flags)
+{
+       gfp_flags |= mc->gfp_zero;
+
+       if (mc->kmem_cache)
+               return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
+       else
+               return (void *)__get_free_page(gfp_flags);
+}
+
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
+{
+       void *obj;
+
+       if (mc->nobjs >= min)
+               return 0;
+       while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
+               obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
+               if (!obj)
+                       return mc->nobjs >= min ? 0 : -ENOMEM;
+               mc->objects[mc->nobjs++] = obj;
+       }
+       return 0;
+}
+
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
+{
+       return mc->nobjs;
+}
+
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+       while (mc->nobjs) {
+               if (mc->kmem_cache)
+                       kmem_cache_free(mc->kmem_cache, 
mc->objects[--mc->nobjs]);
+               else
+                       free_page((unsigned long)mc->objects[--mc->nobjs]);
+       }
+}
+
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+{
+       void *p;
+
+       if (WARN_ON(!mc->nobjs))
+               p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
+       else
+               p = mc->objects[--mc->nobjs];
+       BUG_ON(!p);
+       return p;
+}
+#endif
+
 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
        mutex_init(&vcpu->mutex);
-- 
2.26.0

Reply via email to