This moves PMU switch to guest as late as possible in entry, and switch
back to host as early as possible at exit. This helps the host get the
most perf coverage of KVM entry/exit code as possible.

This is slightly suboptimal for SPR scheduling point of view when the
PMU is enabled, but when perf is disabled there is no real difference.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c          | 6 ++----
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 6 ++----
 2 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index db42eeb27c15..5a1859311b3e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3820,8 +3820,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu 
*vcpu, u64 time_limit, uns
        s64 dec;
        int trap;
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
-
        save_p9_host_os_sprs(&host_os_sprs);
 
        /*
@@ -3884,9 +3882,11 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu 
*vcpu, u64 time_limit, uns
 
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
        trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
                                  __pa(&vcpu->arch.regs));
        kvmhv_restore_hv_return_state(vcpu, &hvregs);
+       switch_pmu_to_host(vcpu, &host_os_sprs);
        vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
        vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
        vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
@@ -3905,8 +3905,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu 
*vcpu, u64 time_limit, uns
 
        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
 
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        return trap;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c 
b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 6bef509bccb8..619bbcd47b92 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -601,8 +601,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
        local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
        local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
-
        save_p9_host_os_sprs(&host_os_sprs);
 
        /*
@@ -744,7 +742,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
 
        accumulate_time(vcpu, &vcpu->arch.guest_time);
 
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
        kvmppc_p9_enter_guest(vcpu);
+       switch_pmu_to_host(vcpu, &host_os_sprs);
 
        accumulate_time(vcpu, &vcpu->arch.rm_intr);
 
@@ -955,8 +955,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
                asm volatile(PPC_CP_ABORT);
 
 out:
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        end_timing(vcpu);
 
        return trap;
-- 
2.23.0

Reply via email to