The access control for SME follows the same structure as for the base FP
and SVE extensions, with control being via CPACR_ELx.SMEN and CPTR_EL2.TSM
mirroring the equivalent FPSIMD and SVE controls in those registers. Add
handling for these controls and exceptions mirroring the existing handling
for FPSIMD and SVE.

Signed-off-by: Mark Brown <broo...@kernel.org>
---
 arch/arm64/kvm/handle_exit.c            | 14 ++++++++++++++
 arch/arm64/kvm/hyp/include/hyp/switch.h | 11 ++++++-----
 arch/arm64/kvm/hyp/nvhe/switch.c        |  4 +++-
 arch/arm64/kvm/hyp/vhe/switch.c         | 17 ++++++++++++-----
 4 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a598072f36d2..d96f3a585d70 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -232,6 +232,19 @@ static int handle_sve(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+/*
+ * Guest access to SME registers should be routed to this handler only
+ * when the system doesn't support SME.
+ */
+static int handle_sme(struct kvm_vcpu *vcpu)
+{
+       if (guest_hyp_sme_traps_enabled(vcpu))
+               return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
 /*
  * Two possibilities to handle a trapping ptrauth instruction:
  *
@@ -385,6 +398,7 @@ static exit_handle_fn arm_exit_handlers[] = {
        [ESR_ELx_EC_SVC64]      = handle_svc,
        [ESR_ELx_EC_SYS64]      = kvm_handle_sys_reg,
        [ESR_ELx_EC_SVE]        = handle_sve,
+       [ESR_ELx_EC_SME]        = handle_sme,
        [ESR_ELx_EC_ERET]       = kvm_handle_eret,
        [ESR_ELx_EC_IABT_LOW]   = kvm_handle_guest_abort,
        [ESR_ELx_EC_DABT_LOW]   = kvm_handle_guest_abort,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
b/arch/arm64/kvm/hyp/include/hyp/switch.h
index c128b4d25a2d..9375afa96b71 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -69,11 +69,8 @@ static inline void __activate_cptr_traps_nvhe(struct 
kvm_vcpu *vcpu)
 {
        u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
 
-       /*
-        * Always trap SME since it's not supported in KVM.
-        * TSM is RES1 if SME isn't implemented.
-        */
-       val |= CPTR_EL2_TSM;
+       if (!vcpu_has_sme(vcpu) || !guest_owns_fp_regs())
+               val |= CPTR_EL2_TSM;
 
        if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
                val |= CPTR_EL2_TZ;
@@ -101,6 +98,8 @@ static inline void __activate_cptr_traps_vhe(struct kvm_vcpu 
*vcpu)
                val |= CPACR_EL1_FPEN;
                if (vcpu_has_sve(vcpu))
                        val |= CPACR_EL1_ZEN;
+               if (vcpu_has_sme(vcpu))
+                       val |= CPACR_EL1_SMEN;
        }
 
        if (!vcpu_has_nv(vcpu))
@@ -142,6 +141,8 @@ static inline void __activate_cptr_traps_vhe(struct 
kvm_vcpu *vcpu)
                val &= ~CPACR_EL1_FPEN;
        if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
                val &= ~CPACR_EL1_ZEN;
+       if (!(SYS_FIELD_GET(CPACR_EL1, SMEN, cptr) & BIT(0)))
+               val &= ~CPACR_EL1_SMEN;
 
        if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
                val |= cptr & CPACR_EL1_E0POE;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index ccd575d5f6de..79a3e5c290f9 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -175,6 +175,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
        [ESR_ELx_EC_SYS64]              = kvm_hyp_handle_sysreg,
        [ESR_ELx_EC_SVE]                = kvm_hyp_handle_fpsimd,
+       [ESR_ELx_EC_SME]                = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
        [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
@@ -186,7 +187,8 @@ static const exit_handler_fn pvm_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_SYS64]              = kvm_handle_pvm_sys64,
        [ESR_ELx_EC_SVE]                = kvm_handle_pvm_restricted,
-       [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
+       [ESR_ELx_EC_SME]                = kvm_handle_pvm_restricted,
+       [ESR_ELx_EC_FP_ASIMD]           = kvm_handle_pvm_restricted,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
        [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
        [ESR_ELx_EC_WATCHPT_LOW]        = kvm_hyp_handle_watchpt_low,
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index e482181c6632..86a892966a18 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -448,22 +448,28 @@ static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu 
*vcpu, u64 *exit_code)
        return true;
 }
 
-static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
+static bool kvm_hyp_handle_vec_cr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
 
        if (!vcpu_has_nv(vcpu))
                return false;
 
-       if (sysreg != SYS_ZCR_EL2)
+       switch (sysreg) {
+       case SYS_ZCR_EL2:
+       case SYS_SMCR_EL2:
+               break;
+       default:
                return false;
+       }
 
        if (guest_owns_fp_regs())
                return false;
 
        /*
-        * ZCR_EL2 traps are handled in the slow path, with the expectation
-        * that the guest's FP context has already been loaded onto the CPU.
+        * ZCR_EL2 and SMCR_EL2 traps are handled in the slow path,
+        * with the expectation that the guest's FP context has
+        * already been loaded onto the CPU.
         *
         * Load the guest's FP context and unconditionally forward to the
         * slow path for handling (i.e. return false).
@@ -483,7 +489,7 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu 
*vcpu, u64 *exit_code)
        if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
                return true;
 
-       if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
+       if (kvm_hyp_handle_vec_cr_el2(vcpu, exit_code))
                return true;
 
        return kvm_hyp_handle_sysreg(vcpu, exit_code);
@@ -512,6 +518,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
        [ESR_ELx_EC_SYS64]              = kvm_hyp_handle_sysreg_vhe,
+       [ESR_ELx_EC_SME]                = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_SVE]                = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,

-- 
2.39.5


Reply via email to