Let the code never use unsupported event counters. Change
kvm_pmu_handle_pmcr() to only reset supported counters and
kvm_pmu_vcpu_reset() to only sto supported counters.

Other actions are filtered on the supported counters in
kvm/sysregs.c

Signed-off-by: Eric Auger <[email protected]>
---
 virt/kvm/arm/pmu.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 560db6282137..f0d0312c0a55 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -247,10 +247,11 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
  */
 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
 {
-       int i;
+       unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int i;
 
-       for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+       for_each_set_bit(i, &mask, 32)
                kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
 
        bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
@@ -527,10 +528,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 
val)
  */
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 {
-       u64 mask;
+       unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
        int i;
 
-       mask = kvm_pmu_valid_counter_mask(vcpu);
        if (val & ARMV8_PMU_PMCR_E) {
                kvm_pmu_enable_counter_mask(vcpu,
                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
@@ -542,7 +542,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 
        if (val & ARMV8_PMU_PMCR_P) {
-               for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+               for_each_set_bit(i, &mask, 32)
                        kvm_pmu_set_counter_value(vcpu, i, 0);
        }
 }
-- 
2.20.1

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to