This juggles SPR switching on the entry and exit sides to be more
symmetric, which makes the next refactoring patch possible with no
functional change.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 5c44c4ff5d46..53fe41102c22 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4222,7 +4222,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                msr = mfmsr(); /* TM restore can update msr */
        }
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
+       load_spr_state(vcpu, &host_os_sprs);
 
        load_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
@@ -4230,7 +4230,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 #endif
        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
-       load_spr_state(vcpu, &host_os_sprs);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
 
        if (kvmhv_on_pseries()) {
                /*
@@ -4330,6 +4330,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                        vcpu->arch.slb_max = 0;
        }
 
+       switch_pmu_to_host(vcpu, &host_os_sprs);
+
        store_spr_state(vcpu);
 
        store_fp_state(&vcpu->arch.fp);
@@ -4344,8 +4346,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        vcpu_vpa_increment_dispatch(vcpu);
 
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        timer_rearm_host_dec(*tb);
 
        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
-- 
2.23.0

Reply via email to