Knowing the number of CPUs is necessary for determining the boundaries
of per-cpu variables, which will be used for upcoming hypervisor
tracing. hyp_nr_cpus which stores this value, is only initialised for
the pKVM hypervisor. Make it accessible for the nVHE hypervisor as well.

With the kernel now responsible for initialising hyp_nr_cpus, the
nr_cpus parameter is no longer needed in __pkvm_init.

Signed-off-by: Vincent Donnefort <[email protected]>

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index e6be1f5d0967..b169d85458f9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -128,8 +128,7 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context 
*host_ctxt, u64 spsr,
 #ifdef __KVM_NVHE_HYPERVISOR__
 void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
                void (*fn)(void));
-int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
-               unsigned long *per_cpu_base, u32 hyp_va_bits);
+int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long 
*per_cpu_base, u32 hyp_va_bits);
 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
 #endif
 
@@ -146,5 +145,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
 extern unsigned long kvm_nvhe_sym(__icache_flags);
 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
 extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
+extern unsigned long kvm_nvhe_sym(hyp_nr_cpus);
 
 #endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 052bf0d4d0b0..7ba427f9608c 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -35,6 +35,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_nested.h>
 #include <asm/kvm_pkvm.h>
@@ -2396,7 +2397,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
        preempt_disable();
        cpu_hyp_init_context();
        ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
-                               num_possible_cpus(), kern_hyp_va(per_cpu_base),
+                               kern_hyp_va(per_cpu_base),
                                hyp_va_bits);
        cpu_hyp_init_features();
 
@@ -2605,6 +2606,8 @@ static int __init init_hyp_mode(void)
                kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned 
long)page_addr;
        }
 
+       kvm_nvhe_sym(hyp_nr_cpus) = num_possible_cpus();
+
        /*
         * Map the Hyp-code called directly from the host
         */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h 
b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 5f9d56754e39..f8a7b8c04c49 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -30,8 +30,6 @@ enum pkvm_component_id {
        PKVM_ID_FFA,
 };
 
-extern unsigned long hyp_nr_cpus;
-
 int __pkvm_prot_finalize(void);
 int __pkvm_host_share_hyp(u64 pfn);
 int __pkvm_host_unshare_hyp(u64 pfn);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 29430c031095..34546dce57ff 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -482,17 +482,15 @@ static void handle___pkvm_init(struct kvm_cpu_context 
*host_ctxt)
 {
        DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
        DECLARE_REG(unsigned long, size, host_ctxt, 2);
-       DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
-       DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
-       DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
+       DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 3);
+       DECLARE_REG(u32, hyp_va_bits, host_ctxt, 4);
 
        /*
         * __pkvm_init() will return only if an error occurred, otherwise it
         * will tail-call in __pkvm_init_finalise() which will have to deal
         * with the host context directly.
         */
-       cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
-                                           hyp_va_bits);
+       cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, per_cpu_base, 
hyp_va_bits);
 }
 
 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 90bd014e952f..d8e5b563fd3d 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -341,8 +341,7 @@ void __noreturn __pkvm_init_finalise(void)
        __host_enter(host_ctxt);
 }
 
-int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
-               unsigned long *per_cpu_base, u32 hyp_va_bits)
+int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long 
*per_cpu_base, u32 hyp_va_bits)
 {
        struct kvm_nvhe_init_params *params;
        void *virt = hyp_phys_to_virt(phys);
@@ -355,7 +354,6 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, 
unsigned long nr_cpus,
                return -EINVAL;
 
        hyp_spin_lock_init(&pkvm_pgd_lock);
-       hyp_nr_cpus = nr_cpus;
 
        ret = divide_memory_pool(virt, size);
        if (ret)
-- 
2.52.0.107.ga0afd4fd5b-goog


Reply via email to