Add support for the :G and :H attributes in perf by handling the
exclude_host/exclude_guest event attributes.

We notify KVM of counters that we wish to be enabled or disabled on
guest entry/exit and thus defer from starting or stopping :G events
as per the events exclude_host attribute.

When using VHE, EL2 is unused by the guest - therefore we can filter
out these events with the PMU as per the 'exclude_host' attribute.

With both VHE and non-VHE we switch the counters between host/guest
at EL2. With non-VHE when using 'exclude_host' we filter out EL2.

These changes eliminate counters counting host events on the
boundaries of guest entry/exit when using :G. However when using :H
unless exclude_hv is set on non-VHE then there is a small blackout
window at the guest entry/exit where host events are not captured.

Signed-off-by: Andrew Murray <[email protected]>
---
 arch/arm64/kernel/perf_event.c | 41 +++++++++++++++++++++++++++++++++++------
 1 file changed, 35 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index de564ae..412bd80 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
 
 #include <linux/acpi.h>
 #include <linux/clocksource.h>
+#include <linux/kvm_host.h>
 #include <linux/of.h>
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
@@ -647,11 +648,23 @@ static inline int armv8pmu_enable_counter(int idx)
 
 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
 {
+       struct perf_event_attr *attr = &event->attr;
        int idx = event->hw.idx;
+       u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
 
-       armv8pmu_enable_counter(idx);
        if (armv8pmu_event_is_chained(event))
-               armv8pmu_enable_counter(idx - 1);
+               counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+       if (attr->exclude_host)
+               kvm_set_clr_guest_pmu_events(0, counter_bits);
+       if (attr->exclude_guest)
+               kvm_set_clr_host_pmu_events(0, counter_bits);
+
+       if (!attr->exclude_host) {
+               armv8pmu_enable_counter(idx);
+               if (armv8pmu_event_is_chained(event))
+                       armv8pmu_enable_counter(idx - 1);
+       }
 }
 
 static inline int armv8pmu_disable_counter(int idx)
@@ -664,11 +677,23 @@ static inline int armv8pmu_disable_counter(int idx)
 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
+       struct perf_event_attr *attr = &event->attr;
        int idx = hwc->idx;
+       u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
 
        if (armv8pmu_event_is_chained(event))
-               armv8pmu_disable_counter(idx - 1);
-       armv8pmu_disable_counter(idx);
+               counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+       if (attr->exclude_host)
+               kvm_set_clr_guest_pmu_events(counter_bits, 0);
+       if (attr->exclude_guest)
+               kvm_set_clr_host_pmu_events(counter_bits, 0);
+
+       if (!attr->exclude_host) {
+               if (armv8pmu_event_is_chained(event))
+                       armv8pmu_disable_counter(idx - 1);
+               armv8pmu_disable_counter(idx);
+       }
 }
 
 static inline int armv8pmu_enable_intens(int idx)
@@ -945,12 +970,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event 
*event,
         * with other architectures (x86 and Power).
         */
        if (is_kernel_in_hyp_mode()) {
-               if (!attr->exclude_kernel)
+               if (!attr->exclude_kernel && !attr->exclude_host)
                        config_base |= ARMV8_PMU_INCLUDE_EL2;
        } else {
                if (attr->exclude_kernel)
                        config_base |= ARMV8_PMU_EXCLUDE_EL1;
-               if (!attr->exclude_hv)
+               if (!attr->exclude_hv && !attr->exclude_host)
                        config_base |= ARMV8_PMU_INCLUDE_EL2;
        }
        if (attr->exclude_user)
@@ -976,6 +1001,10 @@ static void armv8pmu_reset(void *info)
                armv8pmu_disable_intens(idx);
        }
 
+       /* Clear the counters we flip at guest entry/exit */
+       kvm_set_clr_host_pmu_events(U32_MAX, 0);
+       kvm_set_clr_guest_pmu_events(U32_MAX, 0);
+
        /*
         * Initialize & Reset PMNC. Request overflow interrupt for
         * 64 bit cycle counter but cheat in armv8pmu_write_counter().
-- 
2.7.4

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to