Moving the mtmsrd after the host SPRs are saved and before the guest
SPRs start to be loaded can prevent an SPR scoreboard stall (because
the mtmsrd is L=1 type which does not cause context synchronisation.

This is also now more convenient to combined with the mtmsrd L=0
instruction to enable facilities just below, but that is not done yet.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 541a023e25dd..6b0689589e13 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4169,6 +4169,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        save_p9_host_os_sprs(&host_os_sprs);
 
+       /*
+        * This could be combined with MSR[RI] clearing, but that expands
+        * the unrecoverable window. It would be better to cover unrecoverable
+        * with KVM bad interrupt handling rather than use MSR[RI] at all.
+        *
+        * Much more difficult and less worthwhile to combine with IR/DR
+        * disable.
+        */
+       hard_irq_disable();
+       if (lazy_irq_pending())
+               return 0;
+
        /* MSR bits may have been cleared by context switch */
        msr = 0;
        if (IS_ENABLED(CONFIG_PPC_FPU))
@@ -4680,6 +4692,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        struct kvmppc_vcore *vc;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_nested_guest *nested = vcpu->arch.nested;
+       unsigned long flags;
 
        trace_kvmppc_run_vcpu_enter(vcpu);
 
@@ -4723,11 +4736,11 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        if (kvm_is_radix(kvm))
                kvmppc_prepare_radix_vcpu(vcpu, pcpu);
 
-       local_irq_disable();
-       hard_irq_disable();
+       /* flags save not required, but irq_pmu has no disable/enable API */
+       powerpc_local_irq_pmu_save(flags);
        if (signal_pending(current))
                goto sigpend;
-       if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+       if (need_resched() || !kvm->arch.mmu_ready)
                goto out;
 
        if (!nested) {
@@ -4795,7 +4808,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        }
        vtime_account_guest_exit();
 
-       local_irq_enable();
+       powerpc_local_irq_pmu_restore(flags);
 
        cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
 
@@ -4853,7 +4866,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        run->exit_reason = KVM_EXIT_INTR;
        vcpu->arch.ret = -EINTR;
  out:
-       local_irq_enable();
+       powerpc_local_irq_pmu_restore(flags);
        preempt_enable();
        goto done;
 }
-- 
2.23.0

Reply via email to