In order to ensure register permission checks will have consistent results whether or not the PMU is partitioned, define some access helpers for PMUSERENR and PMSELR that always return the canonical value for those registers, whether it lives in a physical or virtual register.
Signed-off-by: Colton Lewis <[email protected]> --- arch/arm64/kvm/pmu.c | 16 ++++++++++++++++ arch/arm64/kvm/sys_regs.c | 6 +++--- include/kvm/arm_pmu.h | 12 ++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 74a5d35edb244..344ed9d8329a6 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -885,3 +885,19 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) return u64_replace_bits(pmcr, n, ARMV8_PMU_PMCR_N); } + +u64 kvm_vcpu_read_pmselr(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_pmu_is_partitioned(vcpu)) + return read_sysreg(pmselr_el0); + else + return __vcpu_sys_reg(vcpu, PMSELR_EL0); +} + +u64 kvm_vcpu_read_pmuserenr(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_pmu_is_partitioned(vcpu)) + return read_sysreg(pmuserenr_el0); + else + return __vcpu_sys_reg(vcpu, PMUSERENR_EL0); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index a460e93b1ad0a..9e893859a41c9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -987,7 +987,7 @@ static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) { - u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); + u64 reg = kvm_vcpu_read_pmuserenr(vcpu); bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); if (!enabled) @@ -1141,7 +1141,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, return false; idx = SYS_FIELD_GET(PMSELR_EL0, SEL, - __vcpu_sys_reg(vcpu, PMSELR_EL0)); + kvm_vcpu_read_pmselr(vcpu)); } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ if (pmu_access_cycle_counter_el0_disabled(vcpu)) @@ -1191,7 +1191,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ - idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, kvm_vcpu_read_pmselr(vcpu)); reg = PMEVTYPER0_EL0 + idx; } else if (r->CRn == 14 && (r->CRm & 12) == 12) { idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 50983cdbec045..f21439000129b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -130,6 +130,8 @@ int kvm_arm_set_default_pmu(struct kvm *kvm); u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); +u64 kvm_vcpu_read_pmselr(struct kvm_vcpu *vcpu); +u64 kvm_vcpu_read_pmuserenr(struct kvm_vcpu *vcpu); bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); #else @@ -250,6 +252,16 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) return 0; } +static inline u64 kvm_vcpu_read_pmselr(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static u64 kvm_vcpu_read_pmuserenr(struct kvm_vcpu *vcpu) +{ + return 0; +} + static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) { return false; -- 2.53.0.rc2.204.g2597b5adb4-goog
