Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/tcg/cpu64.c | 10 ++++++++-- docs/system/arm/emulation.rst | 6 ++++++ 2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 5d8ed2794d..f73729926b 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1194,7 +1194,7 @@ void aarch64_max_tcg_initfn(Object *obj) */ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ - t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */ t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */ cpu->isar.id_aa64pfr1 = t; @@ -1245,10 +1245,11 @@ void aarch64_max_tcg_initfn(Object *obj) cpu->isar.id_aa64mmfr3 = t; t = cpu->isar.id_aa64zfr0; - t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); + t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 2); /* FEAT_SVE2p1 */ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 2); /* FEAT_BF16, FEAT_EBF16 */ + t = FIELD_DP64(t, ID_AA64ZFR0, B16B16, 1); /* FEAT_SVE_B16B16 */ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */ @@ -1264,11 +1265,16 @@ void aarch64_max_tcg_initfn(Object *obj) t = cpu->isar.id_aa64smfr0; t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, BI32I32, 1); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F16F16, 1); /* FEAT_SME_F16F16 */ + t = FIELD_DP64(t, ID_AA64SMFR0, B16B16, 1); /* FEAT_SME_B16B16 */ + t = FIELD_DP64(t, ID_AA64SMFR0, I16I32, 5); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, SMEVER, 2); /* FEAT_SME2p1 */ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ cpu->isar.id_aa64smfr0 = t; diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 78c2fd2113..890dc6fee2 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -129,16 +129,22 @@ the following architecture extensions: - FEAT_SM3 (Advanced SIMD SM3 instructions) - FEAT_SM4 (Advanced SIMD SM4 instructions) - FEAT_SME (Scalable Matrix Extension) +- FEAT_SME2 (Scalable Matrix Extension version 2) +- FEAT_SME2p1 (Scalable Matrix Extension version 2.1) +- FEAT_SME_B16B16 (Non-widening BFloat16 arithmetic for SME2) - FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) +- FEAT_SME_F16F16 (Non-widening half-precision FP16 arithmetic for SME2) - FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) - FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) - FEAT_SVE (Scalable Vector Extension) - FEAT_SVE_AES (Scalable Vector AES instructions) +- FEAT_SVE_B16B16 (Non-widening BFloat16 arithmetic for SVE2) - FEAT_SVE_BitPerm (Scalable Vector Bit Permutes instructions) - FEAT_SVE_PMULL128 (Scalable Vector PMULL instructions) - FEAT_SVE_SHA3 (Scalable Vector SHA3 instructions) - FEAT_SVE_SM4 (Scalable Vector SM4 instructions) - FEAT_SVE2 (Scalable Vector Extension version 2) +- FEAT_SVE2p1 (Scalable Vector Extension version 2.1) - FEAT_SPECRES (Speculation restriction instructions) - FEAT_SSBS (Speculative Store Bypass Safe) - FEAT_SSBS2 (MRS and MSR instructions for SSBS version 2) -- 2.43.0