KVM PMU management code looks for particular frozen/disabled bits in
the PMU registers so it knows whether it must clear them when coming
out of a guest or not. Setting this up helps KVM make these optimisations
without getting confused. Longer term the better approach might be to
move guest/host PMU switching to the perf subsystem.

Cc: Athira Jajeev <atraj...@linux.vnet.ibm.com>
Cc: Madhavan Srinivasan <ma...@linux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kernel/cpu_setup_power.c | 4 ++--
 arch/powerpc/kernel/dt_cpu_ftrs.c     | 6 +++---
 arch/powerpc/kvm/book3s_hv.c          | 5 +++++
 3 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/cpu_setup_power.c 
b/arch/powerpc/kernel/cpu_setup_power.c
index a29dc8326622..3dc61e203f37 100644
--- a/arch/powerpc/kernel/cpu_setup_power.c
+++ b/arch/powerpc/kernel/cpu_setup_power.c
@@ -109,7 +109,7 @@ static void init_PMU_HV_ISA207(void)
 static void init_PMU(void)
 {
        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);
        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
 }
@@ -123,7 +123,7 @@ static void init_PMU_ISA31(void)
 {
        mtspr(SPRN_MMCR3, 0);
        mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
-       mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+       mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
 }
 
 /*
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c 
b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 38ea20fadc4a..a6bb0ee179cd 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -353,7 +353,7 @@ static void init_pmu_power8(void)
        }
 
        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);
        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
        mtspr(SPRN_MMCRS, 0);
@@ -392,7 +392,7 @@ static void init_pmu_power9(void)
                mtspr(SPRN_MMCRC, 0);
 
        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);
        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
 }
@@ -428,7 +428,7 @@ static void init_pmu_power10(void)
 
        mtspr(SPRN_MMCR3, 0);
        mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
-       mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+       mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
 }
 
 static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 197665c1a1cd..e6cb923f91ff 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2718,6 +2718,11 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu 
*vcpu)
 #endif
 #endif
        vcpu->arch.mmcr[0] = MMCR0_FC;
+       if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+               vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
+               vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+       }
+
        vcpu->arch.ctrl = CTRL_RUNLATCH;
        /* default to host PVR, since we can't spoof it */
        kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
-- 
2.23.0

Reply via email to