From: Nicholas Piggin <npig...@gmail.com>

[ Upstream commit 4782e0cd0d184d727ad3b0cfe20d1d44d9f98239 ]

The softpatch interrupt sets HSRR0 to the faulting instruction +4, so
it should subtract 4 for the faulting instruction address in the case
it is a TM softpatch interrupt (the instruction was not executed) and
it was not emulated.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
Signed-off-by: Michael Ellerman <m...@ellerman.id.au>
Link: https://lore.kernel.org/r/20210811160134.904987-4-npig...@gmail.com
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 arch/powerpc/kvm/book3s_hv_tm.c | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index cc90b8b82329..e7c36f8bf205 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -46,6 +46,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        u64 newmsr, bescr;
        int ra, rs;
 
+       /*
+        * The TM softpatch interrupt sets NIP to the instruction following
+        * the faulting instruction, which is not executed. Rewind nip to the
+        * faulting instruction so it looks like a normal synchronous
+        * interrupt, then update nip in the places where the instruction is
+        * emulated.
+        */
+       vcpu->arch.regs.nip -= 4;
+
        /*
         * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
         * in these instructions, so masking bit 31 out doesn't change these
@@ -67,7 +76,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                               (newmsr & MSR_TM)));
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
-               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.cfar = vcpu->arch.regs.nip;
                vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
                return RESUME_GUEST;
 
@@ -100,7 +109,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.bescr = bescr;
                msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                vcpu->arch.shregs.msr = msr;
-               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.cfar = vcpu->arch.regs.nip;
                vcpu->arch.regs.nip = vcpu->arch.ebbrr;
                return RESUME_GUEST;
 
@@ -116,6 +125,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -152,6 +162,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                                msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
                }
                vcpu->arch.shregs.msr = msr;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -189,6 +200,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -220,6 +232,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr = msr | MSR_TS_S;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
        }
 
-- 
2.30.2

Reply via email to