The current PMU emulation sometimes narrows counters to 32bit
if the counter isn't the cycle counter. As this is going to
change with PMUv3p5 where the counters are all 64bit, fix
the couple of cases where this happens unconditionally.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/pmu-emul.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 9e6bc7edc4de..1fab889dbc74 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -151,20 +151,17 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc 
*pmc)
  */
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 {
-       u64 counter, reg, val;
+       u64 reg, val;
 
        if (!pmc->perf_event)
                return;
 
-       counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
+       val = kvm_pmu_get_counter_value(vcpu, pmc->idx);
 
-       if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+       if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
                reg = PMCCNTR_EL0;
-               val = counter;
-       } else {
+       else
                reg = PMEVCNTR0_EL0 + pmc->idx;
-               val = lower_32_bits(counter);
-       }
 
        __vcpu_sys_reg(vcpu, reg) = val;
 
@@ -414,7 +411,8 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
 
                /* Increment this counter */
                reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
-               reg = lower_32_bits(reg);
+               if (!kvm_pmu_idx_is_64bit(vcpu, i))
+                       reg = lower_32_bits(reg);
                __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
 
                /* No overflow? move on */
-- 
2.34.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to