Re: [PATCH v4 29/46] KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C

2021-04-02 Thread Nicholas Piggin
Excerpts from Alexey Kardashevskiy's message of April 2, 2021 2:36 pm:
> 
> 
> On 01/04/2021 21:35, Nicholas Piggin wrote:
>> Excerpts from Alexey Kardashevskiy's message of April 1, 2021 3:30 pm:
>>>
>>>
>>> On 3/23/21 12:02 PM, Nicholas Piggin wrote:
 Almost all logic is moved to C, by introducing a new in_guest mode that
 selects and branches very early in the interrupt handler to the P9 exit
 code.
>> 
>> [...]
>> 
 +/*
 + * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
 + * above if the interrupt was taken for a guest that was entered via
 + * kvmppc_p9_enter_guest().
 + *
 + * This code recovers the host stack and vcpu pointer, saves all GPRs and
 + * CR, LR, CTR, XER as well as guest MSR and NIA into the VCPU, then re-
 + * establishes the host stack and registers to return from  the
 + * kvmppc_p9_enter_guest() function.
>>>
>>> What does "this code" refer to? If it is the asm below, then it does not
>>> save CTR, it is in the c code. Otherwise it is confusing (to me) :)
>> 
>> Yes you're right, CTR is saved in C.
>> 
 + */
 +.balign   IFETCH_ALIGN_BYTES
 +kvmppc_p9_exit_hcall:
 +  mfspr   r11,SPRN_SRR0
 +  mfspr   r12,SPRN_SRR1
 +  li  r10,0xc00
 +  std r10,HSTATE_SCRATCH0(r13)
 +
 +.balign   IFETCH_ALIGN_BYTES
 +kvmppc_p9_exit_interrupt:
>> 
>> [...]
>> 
 +static inline void slb_invalidate(unsigned int ih)
 +{
 +  asm volatile("slbia %0" :: "i"(ih));
 +}
>>>
>>> This one is not used.
>> 
>> It gets used in a later patch, I guess I should move it there.
>> 
>> [...]
>> 
 +int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu)
 +{
 +  u64 *exsave;
 +  unsigned long msr = mfmsr();
 +  int trap;
 +
 +  start_timing(vcpu, >arch.rm_entry);
 +
 +  vcpu->arch.ceded = 0;
 +
 +  WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
 +  WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
 +
 +  mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
 +  mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
 +
 +  /*
 +   * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
 +   * Interrupt (HDSI) the HDSISR is not be updated at all.
 +   *
 +   * To work around this we put a canary value into the HDSISR before
 +   * returning to a guest and then check for this canary when we take a
 +   * HDSI. If we find the canary on a HDSI, we know the hardware didn't
 +   * update the HDSISR. In this case we return to the guest to retake the
 +   * HDSI which should correctly update the HDSISR the second time HDSI
 +   * entry.
 +   *
 +   * Just do this on all p9 processors for now.
 +   */
 +  mtspr(SPRN_HDSISR, HDSISR_CANARY);
 +
 +  accumulate_time(vcpu, >arch.guest_time);
 +
 +  local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST_HV_FAST;
 +  kvmppc_p9_enter_guest(vcpu);
 +  // Radix host and guest means host never runs with guest MMU state
 +  local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
 +
 +  accumulate_time(vcpu, >arch.rm_intr);
 +
 +  /* Get these from r11/12 and paca exsave */
 +  vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
 +  vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
 +  vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
 +  vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
 +
 +  /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
 +  trap = local_paca->kvm_hstate.scratch0 & ~0x2;
 +  if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
 +  exsave = local_paca->exgen;
 +  } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
 +  exsave = local_paca->exnmi;
 +  } else { /* trap == 0x200 */
 +  exsave = local_paca->exmc;
 +  }
 +
 +  vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
 +  vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
 +  vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
 +  vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
 +  vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
 +  vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
 +  vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
 +  vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
 +  vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
 +  vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
 +
 +  vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
 +
 +  if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
 +  vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
 +  vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
 +  kvmppc_realmode_machine_check(vcpu);
 +
 +  } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
 +  kvmppc_realmode_hmi_handler();
 +
 +  } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {

Re: [PATCH v4 29/46] KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C

2021-04-01 Thread Alexey Kardashevskiy




On 01/04/2021 21:35, Nicholas Piggin wrote:

Excerpts from Alexey Kardashevskiy's message of April 1, 2021 3:30 pm:



On 3/23/21 12:02 PM, Nicholas Piggin wrote:

Almost all logic is moved to C, by introducing a new in_guest mode that
selects and branches very early in the interrupt handler to the P9 exit
code.


[...]


+/*
+ * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
+ * above if the interrupt was taken for a guest that was entered via
+ * kvmppc_p9_enter_guest().
+ *
+ * This code recovers the host stack and vcpu pointer, saves all GPRs and
+ * CR, LR, CTR, XER as well as guest MSR and NIA into the VCPU, then re-
+ * establishes the host stack and registers to return from  the
+ * kvmppc_p9_enter_guest() function.


What does "this code" refer to? If it is the asm below, then it does not
save CTR, it is in the c code. Otherwise it is confusing (to me) :)


Yes you're right, CTR is saved in C.


+ */
+.balignIFETCH_ALIGN_BYTES
+kvmppc_p9_exit_hcall:
+   mfspr   r11,SPRN_SRR0
+   mfspr   r12,SPRN_SRR1
+   li  r10,0xc00
+   std r10,HSTATE_SCRATCH0(r13)
+
+.balignIFETCH_ALIGN_BYTES
+kvmppc_p9_exit_interrupt:


[...]


+static inline void slb_invalidate(unsigned int ih)
+{
+   asm volatile("slbia %0" :: "i"(ih));
+}


This one is not used.


It gets used in a later patch, I guess I should move it there.

[...]


+int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu)
+{
+   u64 *exsave;
+   unsigned long msr = mfmsr();
+   int trap;
+
+   start_timing(vcpu, >arch.rm_entry);
+
+   vcpu->arch.ceded = 0;
+
+   WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
+   WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
+
+   mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+   mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
+
+   /*
+* On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
+* Interrupt (HDSI) the HDSISR is not be updated at all.
+*
+* To work around this we put a canary value into the HDSISR before
+* returning to a guest and then check for this canary when we take a
+* HDSI. If we find the canary on a HDSI, we know the hardware didn't
+* update the HDSISR. In this case we return to the guest to retake the
+* HDSI which should correctly update the HDSISR the second time HDSI
+* entry.
+*
+* Just do this on all p9 processors for now.
+*/
+   mtspr(SPRN_HDSISR, HDSISR_CANARY);
+
+   accumulate_time(vcpu, >arch.guest_time);
+
+   local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST_HV_FAST;
+   kvmppc_p9_enter_guest(vcpu);
+   // Radix host and guest means host never runs with guest MMU state
+   local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
+
+   accumulate_time(vcpu, >arch.rm_intr);
+
+   /* Get these from r11/12 and paca exsave */
+   vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
+   vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
+   vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+   vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+   /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
+   trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+   if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+   exsave = local_paca->exgen;
+   } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
+   exsave = local_paca->exnmi;
+   } else { /* trap == 0x200 */
+   exsave = local_paca->exmc;
+   }
+
+   vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
+   vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+   vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
+   vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
+   vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
+   vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
+   vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
+   vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
+   vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
+   vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
+
+   vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+
+   if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+   vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+   vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+   kvmppc_realmode_machine_check(vcpu);
+
+   } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
+   kvmppc_realmode_hmi_handler();
+
+   } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
+   vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+   } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
+   vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+   vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+   vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+

Re: [PATCH v4 29/46] KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C

2021-04-01 Thread Nicholas Piggin
Excerpts from Alexey Kardashevskiy's message of April 1, 2021 3:30 pm:
> 
> 
> On 3/23/21 12:02 PM, Nicholas Piggin wrote:
>> Almost all logic is moved to C, by introducing a new in_guest mode that
>> selects and branches very early in the interrupt handler to the P9 exit
>> code.

[...]

>> +/*
>> + * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
>> + * above if the interrupt was taken for a guest that was entered via
>> + * kvmppc_p9_enter_guest().
>> + *
>> + * This code recovers the host stack and vcpu pointer, saves all GPRs and
>> + * CR, LR, CTR, XER as well as guest MSR and NIA into the VCPU, then re-
>> + * establishes the host stack and registers to return from  the
>> + * kvmppc_p9_enter_guest() function.
> 
> What does "this code" refer to? If it is the asm below, then it does not 
> save CTR, it is in the c code. Otherwise it is confusing (to me) :)

Yes you're right, CTR is saved in C.

>> + */
>> +.balign IFETCH_ALIGN_BYTES
>> +kvmppc_p9_exit_hcall:
>> +mfspr   r11,SPRN_SRR0
>> +mfspr   r12,SPRN_SRR1
>> +li  r10,0xc00
>> +std r10,HSTATE_SCRATCH0(r13)
>> +
>> +.balign IFETCH_ALIGN_BYTES
>> +kvmppc_p9_exit_interrupt:

[...]

>> +static inline void slb_invalidate(unsigned int ih)
>> +{
>> +asm volatile("slbia %0" :: "i"(ih));
>> +}
> 
> This one is not used.

It gets used in a later patch, I guess I should move it there.

[...]

>> +int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu)
>> +{
>> +u64 *exsave;
>> +unsigned long msr = mfmsr();
>> +int trap;
>> +
>> +start_timing(vcpu, >arch.rm_entry);
>> +
>> +vcpu->arch.ceded = 0;
>> +
>> +WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
>> +WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
>> +
>> +mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
>> +mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
>> +
>> +/*
>> + * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
>> + * Interrupt (HDSI) the HDSISR is not be updated at all.
>> + *
>> + * To work around this we put a canary value into the HDSISR before
>> + * returning to a guest and then check for this canary when we take a
>> + * HDSI. If we find the canary on a HDSI, we know the hardware didn't
>> + * update the HDSISR. In this case we return to the guest to retake the
>> + * HDSI which should correctly update the HDSISR the second time HDSI
>> + * entry.
>> + *
>> + * Just do this on all p9 processors for now.
>> + */
>> +mtspr(SPRN_HDSISR, HDSISR_CANARY);
>> +
>> +accumulate_time(vcpu, >arch.guest_time);
>> +
>> +local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST_HV_FAST;
>> +kvmppc_p9_enter_guest(vcpu);
>> +// Radix host and guest means host never runs with guest MMU state
>> +local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
>> +
>> +accumulate_time(vcpu, >arch.rm_intr);
>> +
>> +/* Get these from r11/12 and paca exsave */
>> +vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
>> +vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
>> +vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
>> +vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
>> +
>> +/* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
>> +trap = local_paca->kvm_hstate.scratch0 & ~0x2;
>> +if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
>> +exsave = local_paca->exgen;
>> +} else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
>> +exsave = local_paca->exnmi;
>> +} else { /* trap == 0x200 */
>> +exsave = local_paca->exmc;
>> +}
>> +
>> +vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
>> +vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
>> +vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
>> +vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
>> +vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
>> +vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
>> +vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
>> +vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
>> +vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
>> +vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
>> +
>> +vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
>> +
>> +if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
>> +vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
>> +vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
>> +kvmppc_realmode_machine_check(vcpu);
>> +
>> +} else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
>> +kvmppc_realmode_hmi_handler();
>> +
>> +} else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
>> +vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
>> +
>> +} else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
>> +vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
>> +vcpu->arch.fault_dsisr = 

Re: [PATCH v4 29/46] KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C

2021-03-31 Thread Alexey Kardashevskiy




On 3/23/21 12:02 PM, Nicholas Piggin wrote:

Almost all logic is moved to C, by introducing a new in_guest mode that
selects and branches very early in the interrupt handler to the P9 exit
code.

The remaining assembly is only about 160 lines of low level stack setup,
with VCPU vs host register save and restore, plus a small shim to the
legacy paths in the interrupt handler.

There are two motivations for this, the first is just make the code more
maintainable being in C. The second is to reduce the amount of code
running in a special KVM mode, "realmode". I put that in quotes because
with radix it is no longer necessarily real-mode in the MMU, but it
still has to be treated specially because it may be in real-mode, and
has various important registers like PID, DEC, TB, etc set to guest.
This is hostile to the rest of Linux and can't use arbitrary kernel
functionality or be instrumented well.

This initial patch is a reasonably faithful conversion of the asm code.
It does lack any loop to return quickly back into the guest without
switching out of realmode in the case of unimportant or easily handled
interrupts, as explained in the previous change, handling HV interrupts
in real mode is not so important for P9.

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/include/asm/asm-prototypes.h |   3 +-
  arch/powerpc/include/asm/kvm_asm.h|   3 +-
  arch/powerpc/include/asm/kvm_book3s_64.h  |   8 +
  arch/powerpc/kernel/security.c|   5 +-
  arch/powerpc/kvm/Makefile |   3 +
  arch/powerpc/kvm/book3s_64_entry.S| 246 ++
  arch/powerpc/kvm/book3s_hv.c  |   9 +-
  arch/powerpc/kvm/book3s_hv_interrupt.c| 223 
  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 123 +--
  9 files changed, 500 insertions(+), 123 deletions(-)
  create mode 100644 arch/powerpc/kvm/book3s_hv_interrupt.c

diff --git a/arch/powerpc/include/asm/asm-prototypes.h 
b/arch/powerpc/include/asm/asm-prototypes.h
index 939f3c94c8f3..7c74c80ed994 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -122,6 +122,7 @@ extern s32 patch__call_flush_branch_caches3;
  extern s32 patch__flush_count_cache_return;
  extern s32 patch__flush_link_stack_return;
  extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
  extern s32 patch__memset_nocache, patch__memcpy_nocache;
  
  extern long flush_branch_caches;

@@ -142,7 +143,7 @@ void kvmhv_load_host_pmu(void);
  void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
  void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
  
-int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);

+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
  
  long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);

  long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
diff --git a/arch/powerpc/include/asm/kvm_asm.h 
b/arch/powerpc/include/asm/kvm_asm.h
index a3633560493b..b4f9996bd331 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -146,7 +146,8 @@
  #define KVM_GUEST_MODE_GUEST  1
  #define KVM_GUEST_MODE_SKIP   2
  #define KVM_GUEST_MODE_GUEST_HV   3
-#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_GUEST_HV_FAST   4 /* ISA v3.0 with host radix mode */
+#define KVM_GUEST_MODE_HOST_HV 5
  
  #define KVM_INST_FETCH_FAILED	-1
  
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h

index 9bb9bb370b53..c214bcffb441 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -153,9 +153,17 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu 
*vcpu)
return radix;
  }
  
+int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);

+
  #define KVM_DEFAULT_HPT_ORDER 24  /* 16MB HPT by default */
  #endif
  
+/*

+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY  0x7fff
+
  /*
   * We use a lock bit in HPTE dword 0 to synchronize updates and
   * accesses to each HPTE, and another bit to indicate non-present
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index e4e1a94ccf6a..3a607c11f20f 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -430,16 +430,19 @@ device_initcall(stf_barrier_debugfs_init);
  
  static void update_branch_cache_flush(void)

  {
-   u32 *site;
+   u32 *site, __maybe_unused *site2;
  
  #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE

site = __call_kvm_flush_link_stack;
+   site2 = __call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+   patch_instruction_site(site2,