Richard Henderson <richard.hender...@linaro.org> writes:

> We had completely run out of TBFLAG bits.
> Split A- and M-profile bits into two overlapping buckets.
> This results in 4 free bits.
>
> Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
> ---
>  target/arm/cpu.h       | 52 ++++++++++++++++++++++++---------------
>  target/arm/helper.c    | 17 ++++++-------
>  target/arm/translate.c | 56 +++++++++++++++++++++++-------------------
>  3 files changed, 70 insertions(+), 55 deletions(-)
>
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 28259be733..ae9fc1ded3 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -3188,38 +3188,50 @@ FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
>   */
>  FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)

I'm not sure if this visual aid helps but here you go:

 *  31                  20 19    16 15         10 9            0
 * +----------------------+--------+-------------+--------------+
 * |                      |        |       TBFLAG_A64           |
 * |                      |     +--+-------------+--------------+
 * |     TBFLAG_ANY       |     |   TBFLAG_A32   |              |
 * |                      |     +-----+----------+  TBFLAG_AM32 |
 * |                      |           |TBFLAG_M32|              |
 * +----------------------+-----------+----------+--------------+

>  
> -/* Bit usage when in AArch32 state: */
> -FIELD(TBFLAG_A32, THUMB, 0, 1)          /* Not cached. */
> -FIELD(TBFLAG_A32, VECLEN, 1, 3)         /* Not cached. */
> -FIELD(TBFLAG_A32, VECSTRIDE, 4, 2)      /* Not cached. */
> +/*
> + * Bit usage when in AArch32 state, both A- and M-profile.
> + */
> +FIELD(TBFLAG_AM32, CONDEXEC, 0, 8)      /* Not cached. */
> +FIELD(TBFLAG_AM32, THUMB, 8, 1)         /* Not cached. */
> +
> +/*
> + * Bit usage when in AArch32 state, for A-profile only.
> + */
> +FIELD(TBFLAG_A32, VECLEN, 9, 3)         /* Not cached. */
> +FIELD(TBFLAG_A32, VECSTRIDE, 12, 2)     /* Not cached. */
>  /*
>   * We store the bottom two bits of the CPAR as TB flags and handle
>   * checks on the other bits at runtime. This shares the same bits as
>   * VECSTRIDE, which is OK as no XScale CPU has VFP.
>   * Not cached, because VECLEN+VECSTRIDE are not cached.
>   */
> -FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
> +FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
> +FIELD(TBFLAG_A32, VFPEN, 14, 1)         /* Partially cached, minus FPEXC. */
> +FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
>  /*
>   * Indicates whether cp register reads and writes by guest code should access
>   * the secure or nonsecure bank of banked registers; note that this is not
>   * the same thing as the current security state of the processor!
>   */
> -FIELD(TBFLAG_A32, NS, 6, 1)
> -FIELD(TBFLAG_A32, VFPEN, 7, 1)          /* Partially cached, minus FPEXC. */
> -FIELD(TBFLAG_A32, CONDEXEC, 8, 8)       /* Not cached. */
> -FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
> -/* For M profile only, set if FPCCR.LSPACT is set */
> -FIELD(TBFLAG_A32, LSPACT, 18, 1)        /* Not cached. */
> -/* For M profile only, set if we must create a new FP context */
> -FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */
> -/* For M profile only, set if FPCCR.S does not match current security state 
> */
> -FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */
> -/* For M profile only, Handler (ie not Thread) mode */
> -FIELD(TBFLAG_A32, HANDLER, 21, 1)
> -/* For M profile only, whether we should generate stack-limit checks */
> -FIELD(TBFLAG_A32, STACKCHECK, 22, 1)
> +FIELD(TBFLAG_A32, NS, 16, 1)
>  
> -/* Bit usage when in AArch64 state */
> +/*
> + * Bit usage when in AArch32 state, for M-profile only.
> + */
> +/* Handler (ie not Thread) mode */
> +FIELD(TBFLAG_M32, HANDLER, 9, 1)
> +/* Whether we should generate stack-limit checks */
> +FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
> +/* Set if FPCCR.LSPACT is set */
> +FIELD(TBFLAG_M32, LSPACT, 11, 1)                 /* Not cached. */
> +/* Set if we must create a new FP context */
> +FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1)     /* Not cached. */
> +/* Set if FPCCR.S does not match current security state */
> +FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1)          /* Not cached. */
> +
> +/*
> + * Bit usage when in AArch64 state
> + */
>  FIELD(TBFLAG_A64, TBII, 0, 2)
>  FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
>  FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 5172843667..ec5c7fa325 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -11207,11 +11207,8 @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, 
> int fp_el,
>  {
>      uint32_t flags = 0;
>  
> -    /* v8M always enables the fpu.  */
> -    flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
> -
>      if (arm_v7m_is_handler_mode(env)) {
> -        flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
> +        flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
>      }
>  
>      /*
> @@ -11222,7 +11219,7 @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, 
> int fp_el,
>      if (arm_feature(env, ARM_FEATURE_V8) &&
>          !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
>            (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
> -        flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
> +        flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
>      }
>  
>      return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
> @@ -11385,7 +11382,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, 
> target_ulong *pc,
>              if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
>                  FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
>                  != env->v7m.secure) {
> -                flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
> +                flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
>              }
>  
>              if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
> @@ -11397,12 +11394,12 @@ void cpu_get_tb_cpu_state(CPUARMState *env, 
> target_ulong *pc,
>                   * active FP context; we must create a new FP context before
>                   * executing any FP insn.
>                   */
> -                flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
> +                flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
>              }
>  
>              bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
>              if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
> -                flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
> +                flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
>              }
>          } else {
>              /*
> @@ -11423,8 +11420,8 @@ void cpu_get_tb_cpu_state(CPUARMState *env, 
> target_ulong *pc,
>              }
>          }
>  
> -        flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
> -        flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
> +        flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
> +        flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
>          pstate_for_ss = env->uncached_cpsr;
>      }
>  
> diff --git a/target/arm/translate.c b/target/arm/translate.c
> index 51ea99e6f9..cd757165e1 100644
> --- a/target/arm/translate.c
> +++ b/target/arm/translate.c
> @@ -10841,37 +10841,46 @@ static void 
> arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
>       */
>      dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
>                                 !arm_el_is_aa64(env, 3);
> -    dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
> -    dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
> -    dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
> -    condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
> +    dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
> +    condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
>      dc->condexec_mask = (condexec & 0xf) << 1;
>      dc->condexec_cond = condexec >> 4;
> +
>      core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
>      dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
>      dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
>  #if !defined(CONFIG_USER_ONLY)
>      dc->user = (dc->current_el == 0);
>  #endif
> -    dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
>      dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
> -    dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
> -    dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
> -    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
> -        dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
> -        dc->vec_stride = 0;
> +
> +    if (arm_feature(env, ARM_FEATURE_M)) {
> +        dc->vfp_enabled = 1;
> +        dc->be_data = MO_TE;
> +        dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
> +        dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
> +            regime_is_secure(env, dc->mmu_idx);
> +        dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
> +        dc->v8m_fpccr_s_wrong =
> +            FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
> +        dc->v7m_new_fp_ctxt_needed =
> +            FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
> +        dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
>      } else {
> -        dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
> -        dc->c15_cpar = 0;
> +        dc->be_data =
> +            FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
> +        dc->debug_target_el =
> +            FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
> +        dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
> +        dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
> +        dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
> +        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
> +            dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
> +        } else {
> +            dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
> +            dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
> +        }
>      }
> -    dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
> -    dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
> -        regime_is_secure(env, dc->mmu_idx);
> -    dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
> -    dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
> -    dc->v7m_new_fp_ctxt_needed =
> -        FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
> -    dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
>      dc->cp_regs = cpu->cp_regs;
>      dc->features = env->features;
>  
> @@ -10893,9 +10902,6 @@ static void 
> arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
>      dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
>      dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
>      dc->is_ldex = false;
> -    if (!arm_feature(env, ARM_FEATURE_M)) {
> -        dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, 
> DEBUG_TARGET_EL);
> -    }
>  
>      dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
>  
> @@ -11332,10 +11338,10 @@ static const TranslatorOps thumb_translator_ops = {
>  /* generate intermediate code for basic block 'tb'.  */
>  void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int 
> max_insns)
>  {
> -    DisasContext dc;
> +    DisasContext dc = { };

We seemed to have dropped an initialise here which seems unrelated.

Otherwise:

Reviewed-by: Alex Bennée <alex.ben...@linaro.org>


>      const TranslatorOps *ops = &arm_translator_ops;
>  
> -    if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
> +    if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
>          ops = &thumb_translator_ops;
>      }
>  #ifdef TARGET_AARCH64


-- 
Alex Bennée

Reply via email to