The KVM API for event filtering says that counters do not count when
blocked by the event filter. To enforce that, the event filter must be
rechecked on every load since it might have changed since the last
time the guest wrote a value. If the event is filtered, exclude
counting at all exception levels before writing the hardware.

Signed-off-by: Colton Lewis <[email protected]>
---
 arch/arm64/kvm/pmu-direct.c | 48 +++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

diff --git a/arch/arm64/kvm/pmu-direct.c b/arch/arm64/kvm/pmu-direct.c
index b07b521543478..4bcacc55c507f 100644
--- a/arch/arm64/kvm/pmu-direct.c
+++ b/arch/arm64/kvm/pmu-direct.c
@@ -165,6 +165,53 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
        return *host_data_ptr(nr_event_counters);
 }
 
+/**
+ * kvm_pmu_apply_event_filter()
+ * @vcpu: Pointer to vcpu struct
+ *
+ * To uphold the guarantee of the KVM PMU event filter, we must ensure
+ * no counter counts if the event is filtered. Accomplish this by
+ * filtering all exception levels if the event is filtered.
+ */
+static void kvm_pmu_apply_event_filter(struct kvm_vcpu *vcpu)
+{
+       struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+       unsigned long guest_counters = kvm_pmu_guest_counter_mask(pmu);
+       u64 evtyper_set = ARMV8_PMU_EXCLUDE_EL0 |
+               ARMV8_PMU_EXCLUDE_EL1;
+       u64 evtyper_clr = ARMV8_PMU_INCLUDE_EL2;
+       bool guest_include_el2;
+       u8 i;
+       u64 val;
+       u64 evsel;
+
+       if (!pmu)
+               return;
+
+       for_each_set_bit(i, &guest_counters, ARMPMU_MAX_HWEVENTS) {
+               if (i == ARMV8_PMU_CYCLE_IDX) {
+                       val = __vcpu_sys_reg(vcpu, PMCCFILTR_EL0);
+                       evsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
+               } else {
+                       val = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+                       evsel = val & kvm_pmu_event_mask(vcpu->kvm);
+               }
+
+               guest_include_el2 = (val & ARMV8_PMU_INCLUDE_EL2);
+               val &= ~evtyper_clr;
+
+               if (unlikely(is_hyp_ctxt(vcpu)) && guest_include_el2)
+                       val &= ~ARMV8_PMU_EXCLUDE_EL1;
+
+               if (vcpu->kvm->arch.pmu_filter &&
+                   !test_bit(evsel, vcpu->kvm->arch.pmu_filter))
+                       val |= evtyper_set;
+
+               write_sysreg(i, pmselr_el0);
+               write_sysreg(val, pmxevtyper_el0);
+       }
+}
+
 /**
  * kvm_pmu_load() - Load untrapped PMU registers
  * @vcpu: Pointer to struct kvm_vcpu
@@ -192,6 +239,7 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
 
        pmu = vcpu->kvm->arch.arm_pmu;
        guest_counters = kvm_pmu_guest_counter_mask(pmu);
+       kvm_pmu_apply_event_filter(vcpu);
 
        for_each_set_bit(i, &guest_counters, ARMPMU_MAX_HWEVENTS) {
                val = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i);
-- 
2.53.0.rc2.204.g2597b5adb4-goog


Reply via email to