Commit-ID:  cecf62352aee2b4fe114aafd1b8c5f265a4243ce
Gitweb:     http://git.kernel.org/tip/cecf62352aee2b4fe114aafd1b8c5f265a4243ce
Author:     Alexander Shishkin <alexander.shish...@linux.intel.com>
AuthorDate: Thu, 15 Sep 2016 11:22:33 +0300
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Thu, 15 Sep 2016 11:25:26 +0200

perf/x86/intel: Don't disable "intel_bts" around "intel" event batching

At the moment, intel_bts events get disabled from intel PMU's disable
callback, which includes event scheduling transactions of said PMU,
which have nothing to do with intel_bts events.

We do want to keep intel_bts events off inside the PMI handler to
avoid filling up their buffer too soon.

This patch moves intel_bts enabling/disabling directly to the PMI
handler.

Reported-by: Vince Weaver <vincent.wea...@maine.edu>
Signed-off-by: Alexander Shishkin <alexander.shish...@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <a...@infradead.org>
Cc: Arnaldo Carvalho de Melo <a...@redhat.com>
Cc: Jiri Olsa <jo...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijls...@chello.nl>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Stephane Eranian <eran...@google.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: vi...@deater.net
Link: 
http://lkml.kernel.org/r/20160915082233.11065-1-alexander.shish...@linux.intel.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/events/intel/core.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2cbde2f..4c9a79b 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
  * disabled state if called consecutively.
  *
  * During consecutive calls, the same disable value will be written to related
- * registers, so the PMU state remains unchanged. hw.state in
- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
- * calls.
+ * registers, so the PMU state remains unchanged.
+ *
+ * intel_bts events don't coexist with intel PMU's BTS events because of
+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
  */
 static void __intel_pmu_disable_all(void)
 {
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
-       else
-               intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
 }
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
                        return;
 
                intel_pmu_enable_bts(event->hw.config);
-       } else
-               intel_bts_enable_local();
+       }
 }
 
 static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
         */
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
+       intel_bts_disable_local();
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        if (cpuc->enabled)
                __intel_pmu_enable_all(0, true);
+       intel_bts_enable_local();
 
        /*
         * Only unmask the NMI after the overflow counters

Reply via email to