If running a NV guest on an ARMv8.4-NV capable system, let's
allocate an additional page that will be used by the hypervisor
to fulfill system register accesses.

Signed-off-by: Marc Zyngier <[email protected]>
---
 arch/arm64/include/asm/kvm_host.h |  3 ++-
 arch/arm64/kvm/nested.c           | 10 ++++++++++
 arch/arm64/kvm/reset.c            |  1 +
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 3d4543b0c116..27397ecf9a23 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -554,7 +554,8 @@ struct kvm_vcpu_arch {
  */
 static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 {
-       if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array))
+       if (unlikely(cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
+                    r >= __VNCR_START__ && ctxt->vncr_array))
                return &ctxt->vncr_array[r - __VNCR_START__];
 
        return (u64 *)&ctxt->sys_regs[r];
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index d48b37f034b8..4ffbc14d0245 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -47,6 +47,14 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
        if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
                return -EINVAL;
 
+       if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
+               if (!vcpu->arch.ctxt.vncr_array)
+                       vcpu->arch.ctxt.vncr_array = (u64 
*)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+               if (!vcpu->arch.ctxt.vncr_array)
+                       return -ENOMEM;
+       }
+
        mutex_lock(&kvm->lock);
 
        /*
@@ -64,6 +72,8 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
                    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
                        kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
                        kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+                       free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+                       vcpu->arch.ctxt.vncr_array = NULL;
                } else {
                        kvm->arch.nested_mmus_size = num_mmus;
                        ret = 0;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 55863e8f4b0c..a18172b71829 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -146,6 +146,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        kfree(vcpu->arch.sve_state);
+       free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
 }
 
 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
-- 
2.29.2

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to