On Mon, Dec 2, 2024 at 1:49 PM Daniel Henrique Barboza <dbarb...@ventanamicro.com> wrote: > > > > On 12/2/24 6:15 PM, Atish Kumar Patra wrote: > > On Thu, Nov 28, 2024 at 4:53 AM Daniel Henrique Barboza > > <dbarb...@ventanamicro.com> wrote: > >> > >> > >> > >> On 11/17/24 10:15 PM, Atish Patra wrote: > >>> From: Kaiwen Xue <kaiw...@rivosinc.com> > >>> > >>> The Smcdeleg/Ssccfg adds the support for counter delegation via > >>> S*indcsr and Ssccfg. > >>> > >>> It also adds a new shadow CSR scountinhibit and menvcfg enable bit (CDE) > >>> to enable this extension and scountovf virtualization. > >>> > >>> Signed-off-by: Kaiwen Xue <kaiw...@rivosinc.com> > >>> Co-developed-by: Atish Patra <ati...@rivosinc.com> > >>> Signed-off-by: Atish Patra <ati...@rivosinc.com> > >>> --- > >>> target/riscv/csr.c | 300 > >>> +++++++++++++++++++++++++++++++++++++++++++++++++++-- > >>> 1 file changed, 289 insertions(+), 11 deletions(-) > >>> > >>> diff --git a/target/riscv/csr.c b/target/riscv/csr.c > >>> index 97cc8059ad37..31ea8b8ec20d 100644 > >>> --- a/target/riscv/csr.c > >>> +++ b/target/riscv/csr.c > >>> @@ -385,6 +385,21 @@ static RISCVException aia_smode32(CPURISCVState > >>> *env, int csrno) > >>> return smode32(env, csrno); > >>> } > >>> > >>> +static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno) > >>> +{ > >>> + RISCVCPU *cpu = env_archcpu(env); > >>> + > >>> + if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) { > >>> + return RISCV_EXCP_ILLEGAL_INST; > >>> + } > >>> + > >>> + if (env->virt_enabled) { > >>> + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; > >>> + } > >>> + > >>> + return smode(env, csrno); > >>> +} > >>> + > >>> static bool csrind_extensions_present(CPURISCVState *env) > >>> { > >>> return riscv_cpu_cfg(env)->ext_smcsrind || > >>> riscv_cpu_cfg(env)->ext_sscsrind; > >>> @@ -1222,10 +1237,9 @@ done: > >>> return result; > >>> } > >>> > >>> -static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, > >>> - target_ulong val) > >>> +static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, > >>> target_ulong val, > >>> + uint32_t ctr_idx) > >>> { > >>> - int ctr_idx = csrno - CSR_MCYCLE; > >>> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; > >>> uint64_t mhpmctr_val = val; > >>> > >>> @@ -1250,10 +1264,9 @@ static RISCVException > >>> write_mhpmcounter(CPURISCVState *env, int csrno, > >>> return RISCV_EXCP_NONE; > >>> } > >>> > >>> -static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, > >>> - target_ulong val) > >>> +static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, > >>> target_ulong val, > >>> + uint32_t ctr_idx) > >>> { > >>> - int ctr_idx = csrno - CSR_MCYCLEH; > >>> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; > >>> uint64_t mhpmctr_val = counter->mhpmcounter_val; > >>> uint64_t mhpmctrh_val = val; > >>> @@ -1275,6 +1288,20 @@ static RISCVException > >>> write_mhpmcounterh(CPURISCVState *env, int csrno, > >>> return RISCV_EXCP_NONE; > >>> } > >>> > >>> +static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong > >>> val) > >>> +{ > >>> + int ctr_idx = csrno - CSR_MCYCLE; > >>> + > >>> + return riscv_pmu_write_ctr(env, val, ctr_idx); > >>> +} > >>> + > >>> +static int write_mhpmcounterh(CPURISCVState *env, int csrno, > >>> target_ulong val) > >>> +{ > >>> + int ctr_idx = csrno - CSR_MCYCLEH; > >>> + > >>> + return riscv_pmu_write_ctrh(env, val, ctr_idx); > >>> +} > >>> + > >>> RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong > >>> *val, > >>> bool upper_half, uint32_t > >>> ctr_idx) > >>> { > >>> @@ -1340,6 +1367,167 @@ static RISCVException > >>> read_hpmcounterh(CPURISCVState *env, int csrno, > >>> return riscv_pmu_read_ctr(env, val, true, ctr_index); > >>> } > >>> > >>> +static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx, > >>> + target_ulong *val, target_ulong new_val, > >>> + target_ulong wr_mask) > >>> +{ > >>> + if (wr_mask != 0 && wr_mask != -1) { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + if (!wr_mask && val) { > >>> + riscv_pmu_read_ctr(env, val, false, ctr_idx); > >>> + } else if (wr_mask) { > >>> + riscv_pmu_write_ctr(env, new_val, ctr_idx); > >>> + } else { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + return 0; > >>> +} > >>> + > >>> +static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx, > >>> + target_ulong *val, target_ulong new_val, > >>> + target_ulong wr_mask) > >>> +{ > >>> + if (wr_mask != 0 && wr_mask != -1) { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + if (!wr_mask && val) { > >>> + riscv_pmu_read_ctr(env, val, true, ctr_idx); > >>> + } else if (wr_mask) { > >>> + riscv_pmu_write_ctrh(env, new_val, ctr_idx); > >>> + } else { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + return 0; > >>> +} > >>> + > >>> +static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index, > >>> + target_ulong *val, target_ulong new_val, > >>> + target_ulong wr_mask) > >>> +{ > >>> + uint64_t mhpmevt_val = new_val; > >>> + > >>> + if (wr_mask != 0 && wr_mask != -1) { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + if (!wr_mask && val) { > >>> + *val = env->mhpmevent_val[evt_index]; > >>> + if (riscv_cpu_cfg(env)->ext_sscofpmf) { > >>> + *val &= ~MHPMEVENT_BIT_MINH; > >>> + } > >>> + } else if (wr_mask) { > >>> + wr_mask &= ~MHPMEVENT_BIT_MINH; > >>> + mhpmevt_val = (new_val & wr_mask) | > >>> + (env->mhpmevent_val[evt_index] & ~wr_mask); > >>> + if (riscv_cpu_mxl(env) == MXL_RV32) { > >>> + mhpmevt_val = mhpmevt_val | > >>> + ((uint64_t)env->mhpmeventh_val[evt_index] << > >>> 32); > >>> + } > >>> + env->mhpmevent_val[evt_index] = mhpmevt_val; > >>> + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); > >>> + } else { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + return 0; > >>> +} > >>> + > >>> +static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index, > >>> + target_ulong *val, target_ulong new_val, > >>> + target_ulong wr_mask) > >>> +{ > >>> + uint64_t mhpmevth_val; > >>> + uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; > >>> + > >>> + if (wr_mask != 0 && wr_mask != -1) { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + if (!wr_mask && val) { > >>> + *val = env->mhpmeventh_val[evt_index]; > >>> + if (riscv_cpu_cfg(env)->ext_sscofpmf) { > >>> + *val &= ~MHPMEVENTH_BIT_MINH; > >>> + } > >>> + } else if (wr_mask) { > >>> + wr_mask &= ~MHPMEVENTH_BIT_MINH; > >>> + env->mhpmeventh_val[evt_index] = > >>> + (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & > >>> ~wr_mask); > >>> + mhpmevth_val = env->mhpmeventh_val[evt_index]; > >>> + mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); > >>> + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); > >>> + } else { > >>> + return -EINVAL; > >>> + } > >>> + > >>> + return 0; > >>> +} > >>> + > >>> +static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, > >>> target_ulong *val, > >>> + target_ulong new_val, target_ulong wr_mask) > >>> +{ > >>> + switch (cfg_index) { > >>> + case 0: /* CYCLECFG */ > >>> + if (wr_mask) { > >>> + wr_mask &= ~MCYCLECFG_BIT_MINH; > >>> + env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & > >>> ~wr_mask); > >>> + } else { > >>> + *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH; > >>> + } > >>> + break; > >>> + case 2: /* INSTRETCFG */ > >>> + if (wr_mask) { > >>> + wr_mask &= ~MINSTRETCFG_BIT_MINH; > >>> + env->minstretcfg = (new_val & wr_mask) | > >>> + (env->minstretcfg & ~wr_mask); > >>> + } else { > >>> + *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH; > >>> + } > >>> + break; > >>> + default: > >>> + return -EINVAL; > >>> + } > >>> + return 0; > >>> +} > >>> + > >>> +static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, > >>> target_ulong *val, > >>> + target_ulong new_val, target_ulong wr_mask) > >>> +{ > >>> + > >>> + if (riscv_cpu_mxl(env) != MXL_RV32) { > >>> + return RISCV_EXCP_ILLEGAL_INST; > >>> + } > >>> + > >>> + switch (cfg_index) { > >>> + case 0: /* CYCLECFGH */ > >>> + if (wr_mask) { > >>> + wr_mask &= ~MCYCLECFGH_BIT_MINH; > >>> + env->mcyclecfgh = (new_val & wr_mask) | > >>> + (env->mcyclecfgh & ~wr_mask); > >>> + } else { > >>> + *val = env->mcyclecfgh; > >>> + } > >>> + break; > >>> + case 2: /* INSTRETCFGH */ > >>> + if (wr_mask) { > >>> + wr_mask &= ~MINSTRETCFGH_BIT_MINH; > >>> + env->minstretcfgh = (new_val & wr_mask) | > >>> + (env->minstretcfgh & ~wr_mask); > >>> + } else { > >>> + *val = env->minstretcfgh; > >>> + } > >>> + break; > >>> + default: > >>> + return -EINVAL; > >>> + } > >>> + return 0; > >>> +} > >>> + > >>> + > >>> static RISCVException read_scountovf(CPURISCVState *env, int csrno, > >>> target_ulong *val) > >>> { > >>> @@ -1349,6 +1537,14 @@ static RISCVException read_scountovf(CPURISCVState > >>> *env, int csrno, > >>> target_ulong *mhpm_evt_val; > >>> uint64_t of_bit_mask; > >>> > >>> + /* Virtualize scountovf for counter delegation */ > >>> + if (riscv_cpu_cfg(env)->ext_sscofpmf && > >>> + riscv_cpu_cfg(env)->ext_ssccfg && > >>> + get_field(env->menvcfg, MENVCFG_CDE) && > >>> + env->virt_enabled) { > >>> + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; > >>> + } > >>> + > >>> if (riscv_cpu_mxl(env) == MXL_RV32) { > >>> mhpm_evt_val = env->mhpmeventh_val; > >>> of_bit_mask = MHPMEVENTH_BIT_OF; > >>> @@ -2292,11 +2488,70 @@ static int rmw_xireg_cd(CPURISCVState *env, int > >>> csrno, > >>> target_ulong isel, target_ulong *val, > >>> target_ulong new_val, target_ulong wr_mask) > >>> { > >>> - if (!riscv_cpu_cfg(env)->ext_smcdeleg) { > >>> + int ret = -EINVAL; > >> > >> It seems like both 'ret' and the 'done' label are being used as shortcuts > >> to do > >> 'return ret', and every time 'ret' is assigned to something else can be > >> replaced > >> by an early 'return' exit. > >> > >> I would remove 'ret' and the 'done' label and: > >> > >> > >> > >>> + int ctr_index = isel - ISELECT_CD_FIRST; > >>> + int isel_hpm_start = ISELECT_CD_FIRST + 3; > >>> + > >>> + if (!riscv_cpu_cfg(env)->ext_smcdeleg || > >>> !riscv_cpu_cfg(env)->ext_ssccfg) { > >>> return RISCV_EXCP_ILLEGAL_INST; > >>> } > >>> - /* TODO: Implement the functionality later */ > >>> - return RISCV_EXCP_NONE; > >>> + > >>> + /* Invalid siselect value for reserved */ > >>> + if (ctr_index == 1) { > >>> + goto done; > >> > >> return -EINVAL; > >>> + } > >>> + > >>> + /* sireg4 and sireg5 provides access RV32 only CSRs */ > >>> + if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) && > >>> + (riscv_cpu_mxl(env) != MXL_RV32)) { > >>> + return RISCV_EXCP_ILLEGAL_INST; > >>> + } > >>> + > >>> + /* Check Sscofpmf dependancy */ > >>> + if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 && > >>> + (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) { > >>> + goto done; > >> > >> return -EINVAL; > >> > >>> + } > >>> + > >>> + /* Check smcntrpmf dependancy */ > >>> + if (!riscv_cpu_cfg(env)->ext_smcntrpmf && > >>> + (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) && > >>> + (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) { > >>> + goto done; > >> > >> return -EINVAL; > >> > >>> + } > >>> + > >>> + if (!get_field(env->mcounteren, BIT(ctr_index)) || > >>> + !get_field(env->menvcfg, MENVCFG_CDE)) { > >>> + goto done; > >> > >> return -EINVAL; > >> > >>> + } > >>> + > >>> + switch (csrno) { > >>> + case CSR_SIREG: > >>> + ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask); > >> > >> return rmw_cd_mhpmcounter(env, ctr_index, val, new_val, > >> wr_mask); > >>> + break; > >>> + case CSR_SIREG4: > >>> + ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask); > >> > >> return rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, > >> wr_mask); > >>> + break; > >>> + case CSR_SIREG2: > >>> + if (ctr_index <= 2) { > >>> + ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask); > >> > >> return rmw_cd_ctr_cfg(env, ctr_index, val, new_val, > >> wr_mask); > >>> + } else { > >>> + ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, > >>> wr_mask); > >> > >> return rmw_cd_mhpmevent(env, ctr_index, val, new_val, > >> wr_mask); > >> > >>> + } > >>> + break; > >>> + case CSR_SIREG5: > >>> + if (ctr_index <= 2) { > >>> + ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask); > >> > >> return rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, > >> wr_mask); > >> > >>> + } else { > >>> + ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, > >>> wr_mask); > >> > >> return rmw_cd_mhpmeventh(env, ctr_index, val, new_val, > >> wr_mask); > >> > >>> + } > >>> + break; > >>> + default: > >>> + goto done; > >> > >> return -EINVAL; > >> > >>> + } > >>> + > >>> +done: > >>> + return ret; > >> > >> And remove this last 'return' since we're doing all possible returns > >> already. > >> > > > > Personally, I prefer a single return in a switch case block. That's > > why I have the jump label. > > If you feel too strongly about that, I can change as per your suggestion > > though. > > > Yeah I forgot to mention in my reply that this was more a code style > suggestion than > "please change it". Feel free to keep it as is. > > If you want consistency with the label + return pattern throughout the > function you could > remove the instances of 'return RISCV_EXCP_ILLEGAL_INST' and do > > ret = return RISCV_EXCP_ILLEGAL_INST; > goto done; > > That way we don't have early 'return' exits in some places and 'goto done' in > others. >
Sounds good. Fixed. > And again, optional code style comments. Thanks, > > > Daniel > > > > > >> > >> Thanks, > >> > >> Daniel > >> > >>> } > >>> > >>> /* > >>> @@ -2578,6 +2833,21 @@ static RISCVException > >>> write_mcountinhibit(CPURISCVState *env, int csrno, > >>> return RISCV_EXCP_NONE; > >>> } > >>> > >>> +static RISCVException read_scountinhibit(CPURISCVState *env, int csrno, > >>> + target_ulong *val) > >>> +{ > >>> + /* S-mode can only access the bits delegated by M-mode */ > >>> + *val = env->mcountinhibit & env->mcounteren; > >>> + return RISCV_EXCP_NONE; > >>> +} > >>> + > >>> +static RISCVException write_scountinhibit(CPURISCVState *env, int csrno, > >>> + target_ulong val) > >>> +{ > >>> + write_mcountinhibit(env, csrno, val & env->mcounteren); > >>> + return RISCV_EXCP_NONE; > >>> +} > >>> + > >>> static RISCVException read_mcounteren(CPURISCVState *env, int csrno, > >>> target_ulong *val) > >>> { > >>> @@ -2680,11 +2950,13 @@ static RISCVException write_menvcfg(CPURISCVState > >>> *env, int csrno, > >>> target_ulong val) > >>> { > >>> const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); > >>> - uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | > >>> MENVCFG_CBZE; > >>> + uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | > >>> + MENVCFG_CBZE | MENVCFG_CDE; > >>> > >>> if (riscv_cpu_mxl(env) == MXL_RV64) { > >>> mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | > >>> (cfg->ext_sstc ? MENVCFG_STCE : 0) | > >>> + (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | > >>> (cfg->ext_svadu ? MENVCFG_ADUE : 0); > >>> > >>> if (env_archcpu(env)->cfg.ext_zicfilp) { > >>> @@ -2713,7 +2985,8 @@ static RISCVException write_menvcfgh(CPURISCVState > >>> *env, int csrno, > >>> const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); > >>> uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | > >>> (cfg->ext_sstc ? MENVCFG_STCE : 0) | > >>> - (cfg->ext_svadu ? MENVCFG_ADUE : 0); > >>> + (cfg->ext_svadu ? MENVCFG_ADUE : 0) | > >>> + (cfg->ext_smcdeleg ? MENVCFG_CDE : 0); > >>> uint64_t valh = (uint64_t)val << 32; > >>> > >>> env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); > >>> @@ -5498,6 +5771,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { > >>> write_sstateen_1_3, > >>> .min_priv_ver = PRIV_VERSION_1_12_0 }, > >>> > >>> + /* Supervisor Counter Delegation */ > >>> + [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred, > >>> + read_scountinhibit, write_scountinhibit, > >>> + .min_priv_ver = PRIV_VERSION_1_12_0 }, > >>> + > >>> /* Supervisor Trap Setup */ > >>> [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, > >>> write_sstatus, > >>> NULL, read_sstatus_i128 > >>> }, > >>> > >> >