As all the VNCR-capable system registers are nicely separated
from the rest of the crowd, let's set HCR_EL2.NV2 on and let
the ball rolling.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_arm.h     |  1 +
 arch/arm64/include/asm/kvm_emulate.h | 23 +++++++++++++----------
 arch/arm64/include/asm/sysreg.h      |  1 +
 arch/arm64/kvm/hyp/switch.c          | 18 +++++++++++++++---
 4 files changed, 30 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 2e5be806a5c9..d558f8ebea7e 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -13,6 +13,7 @@
 
 /* Hyp Configuration Register (HCR) bits */
 #define HCR_FWB                (UL(1) << 46)
+#define HCR_NV2                (UL(1) << 45)
 #define HCR_AT         (UL(1) << 44)
 #define HCR_NV1                (UL(1) << 43)
 #define HCR_NV         (UL(1) << 42)
diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 6df684e1790e..b87757f21c42 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -281,21 +281,24 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu 
*vcpu)
 
 static inline u64 __fixup_spsr_el2_write(struct kvm_cpu_context *ctxt, u64 val)
 {
-       if (!__vcpu_el2_e2h_is_set(ctxt)) {
-               /*
-                * Clear the .M field when writing SPSR to the CPU, so that we
-                * can detect when the CPU clobbered our SPSR copy during a
-                * local exception.
-                */
-               val &= ~0xc;
-       }
+       struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+       if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
+               return val;
 
-       return val;
+       /*
+        * Clear the .M field when writing SPSR to the CPU, so that we
+        * can detect when the CPU clobbered our SPSR copy during a
+        * local exception.
+        */
+       return val &= ~0xc;
 }
 
 static inline u64 __fixup_spsr_el2_read(const struct kvm_cpu_context *ctxt, 
u64 val)
 {
-       if (__vcpu_el2_e2h_is_set(ctxt))
+       struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+       if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
                return val;
 
        /*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index a402e762c51d..6a466bc66901 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -425,6 +425,7 @@
 #define SYS_TCR_EL2                    sys_reg(3, 4, 2, 0, 2)
 #define SYS_VTTBR_EL2                  sys_reg(3, 4, 2, 1, 0)
 #define SYS_VTCR_EL2                   sys_reg(3, 4, 2, 1, 2)
+#define SYS_VNCR_EL2                   sys_reg(3, 4, 2, 2, 0)
 
 #define SYS_DACR32_EL2                 sys_reg(3, 4, 3, 0, 0)
 
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index c35e67241d8e..2eca04ca96b6 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -160,7 +160,13 @@ static void __hyp_text __activate_traps(struct kvm_vcpu 
*vcpu)
                         * the EL1 virtual memory control register accesses
                         * as well as the AT S1 operations.
                         */
-                       hcr |= HCR_TVM | HCR_TRVM | HCR_AT | HCR_TTLB | HCR_NV1;
+                       if (enhanced_nested_virt_in_use(vcpu)) {
+                               hcr &= ~HCR_TVM;
+                       } else {
+                               hcr |= HCR_TVM | HCR_TRVM | HCR_TTLB;
+                       }
+
+                       hcr |= HCR_AT | HCR_NV1;
                } else {
                        /*
                         * For a guest hypervisor on v8.1 (VHE), allow to
@@ -186,12 +192,18 @@ static void __hyp_text __activate_traps(struct kvm_vcpu 
*vcpu)
 
                        /*
                         * If we're using the EL1 translation regime
-                        * (TGE clear, then ensure that AT S1 ops are
-                        * trapped too.
+                        * (TGE clear), then ensure that AT S1 and
+                        * TLBI E1 ops are trapped too.
                         */
                        if (!vcpu_el2_tge_is_set(vcpu))
                                hcr |= HCR_AT | HCR_TTLB;
                }
+
+               if (enhanced_nested_virt_in_use(vcpu)) {
+                       hcr |= HCR_AT | HCR_TTLB | HCR_NV2;
+                       write_sysreg_s(vcpu->arch.ctxt.vncr_array,
+                                      SYS_VNCR_EL2);
+               }
        } else if (nested_virt_in_use(vcpu)) {
                hcr |= __vcpu_sys_reg(vcpu, HCR_EL2);
        }
-- 
2.20.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to