Re: [PATCH v3 19/52] KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs

2021-10-19 Thread Nicholas Piggin
Excerpts from Fabiano Rosas's message of October 16, 2021 11:45 pm:
> Nicholas Piggin  writes:
> 
>> This reduces the number of mtmsrd required to enable facility bits when
>> saving/restoring registers, by having the KVM code set all bits up front
>> rather than using individual facility functions that set their particular
>> MSR bits.
>>
>> Signed-off-by: Nicholas Piggin 
> 
> Reviewed-by: Fabiano Rosas 
> 
> Aside: at msr_check_and_set what's with MSR_VSX always being implicitly
> set whenever MSR_FP is set? I get that it depends on MSR_FP, but if FP
> always implies VSX, then you could stop setting MSR_VSX in this patch.

Good question, this seems to come from quite old code and is carried
forward. I did not immediately see why, might have been to avoid
another mtmsrd operation if we later want to set VSX.

But the rule seems to be to set MSR_VSX if both FP and VEC are set, so
this seems a bit odd. __msr_check_and_clear similarly clears VSX if we 
clear FP, but not if we clear VEC.

I might be good to remove that logic or turn it into warnings and make 
sure the callers do the right thing. Not sure.

Thanks,
Nick


Re: [PATCH v3 19/52] KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs

2021-10-16 Thread Fabiano Rosas
Nicholas Piggin  writes:

> This reduces the number of mtmsrd required to enable facility bits when
> saving/restoring registers, by having the KVM code set all bits up front
> rather than using individual facility functions that set their particular
> MSR bits.
>
> Signed-off-by: Nicholas Piggin 

Reviewed-by: Fabiano Rosas 

Aside: at msr_check_and_set what's with MSR_VSX always being implicitly
set whenever MSR_FP is set? I get that it depends on MSR_FP, but if FP
always implies VSX, then you could stop setting MSR_VSX in this patch.

> ---
>  arch/powerpc/include/asm/switch_to.h  |  2 +
>  arch/powerpc/kernel/process.c | 28 +
>  arch/powerpc/kvm/book3s_hv.c  | 59 ++-
>  arch/powerpc/kvm/book3s_hv_p9_entry.c |  1 +
>  4 files changed, 71 insertions(+), 19 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/switch_to.h 
> b/arch/powerpc/include/asm/switch_to.h
> index 9d1fbd8be1c7..e8013cd6b646 100644
> --- a/arch/powerpc/include/asm/switch_to.h
> +++ b/arch/powerpc/include/asm/switch_to.h
> @@ -112,6 +112,8 @@ static inline void clear_task_ebb(struct task_struct *t)
>  #endif
>  }
>
> +void kvmppc_save_user_regs(void);
> +
>  extern int set_thread_tidr(struct task_struct *t);
>
>  #endif /* _ASM_POWERPC_SWITCH_TO_H */
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 50436b52c213..3fca321b820d 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -1156,6 +1156,34 @@ static inline void save_sprs(struct thread_struct *t)
>  #endif
>  }
>
> +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> +void kvmppc_save_user_regs(void)
> +{
> + unsigned long usermsr;
> +
> + if (!current->thread.regs)
> + return;
> +
> + usermsr = current->thread.regs->msr;
> +
> + if (usermsr & MSR_FP)
> + save_fpu(current);
> +
> + if (usermsr & MSR_VEC)
> + save_altivec(current);
> +
> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> + if (usermsr & MSR_TM) {
> + current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
> + current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
> + current->thread.tm_texasr = mfspr(SPRN_TEXASR);
> + current->thread.regs->msr &= ~MSR_TM;
> + }
> +#endif
> +}
> +EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
> +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
> +
>  static inline void restore_sprs(struct thread_struct *old_thread,
>   struct thread_struct *new_thread)
>  {
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index fca89ed2244f..16365c0e9872 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -4140,6 +4140,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
> u64 time_limit,
>   struct p9_host_os_sprs host_os_sprs;
>   s64 dec;
>   u64 tb, next_timer;
> + unsigned long msr;
>   int trap;
>
>   WARN_ON_ONCE(vcpu->arch.ceded);
> @@ -4151,8 +4152,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
> u64 time_limit,
>   if (next_timer < time_limit)
>   time_limit = next_timer;
>
> + vcpu->arch.ceded = 0;
> +
>   save_p9_host_os_sprs(&host_os_sprs);
>
> + /* MSR bits may have been cleared by context switch */
> + msr = 0;
> + if (IS_ENABLED(CONFIG_PPC_FPU))
> + msr |= MSR_FP;
> + if (cpu_has_feature(CPU_FTR_ALTIVEC))
> + msr |= MSR_VEC;
> + if (cpu_has_feature(CPU_FTR_VSX))
> + msr |= MSR_VSX;
> + if (cpu_has_feature(CPU_FTR_TM) ||
> + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
> + msr |= MSR_TM;
> + msr = msr_check_and_set(msr);
> +
>   kvmppc_subcore_enter_guest();
>
>   vc->entry_exit_map = 1;
> @@ -4161,12 +4177,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu 
> *vcpu, u64 time_limit,
>   vcpu_vpa_increment_dispatch(vcpu);
>
>   if (cpu_has_feature(CPU_FTR_TM) ||
> - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
> + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
>   kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
> + msr = mfmsr(); /* TM restore can update msr */
> + }
>
>   switch_pmu_to_guest(vcpu, &host_os_sprs);
>
> - msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
>   load_fp_state(&vcpu->arch.fp);
>  #ifdef CONFIG_ALTIVEC
>   load_vr_state(&vcpu->arch.vr);
> @@ -4275,7 +4292,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
> u64 time_limit,
>
>   restore_p9_host_os_sprs(vcpu, &host_os_sprs);
>
> - msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
>   store_fp_state(&vcpu->arch.fp);
>  #ifdef CONFIG_ALTIVEC
>   store_vr_state(&vcpu->arch.vr);
> @@ -4825,19 +4841,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
>   unsigned long user_tar = 0;
>   unsigned int user_vrsave;
>   struct kvm *kvm;
> + unsigned long msr;
>
>   if (!vcpu->arc

[PATCH v3 19/52] KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs

2021-10-04 Thread Nicholas Piggin
This reduces the number of mtmsrd required to enable facility bits when
saving/restoring registers, by having the KVM code set all bits up front
rather than using individual facility functions that set their particular
MSR bits.

Signed-off-by: Nicholas Piggin 
---
 arch/powerpc/include/asm/switch_to.h  |  2 +
 arch/powerpc/kernel/process.c | 28 +
 arch/powerpc/kvm/book3s_hv.c  | 59 ++-
 arch/powerpc/kvm/book3s_hv_p9_entry.c |  1 +
 4 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/arch/powerpc/include/asm/switch_to.h 
b/arch/powerpc/include/asm/switch_to.h
index 9d1fbd8be1c7..e8013cd6b646 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -112,6 +112,8 @@ static inline void clear_task_ebb(struct task_struct *t)
 #endif
 }
 
+void kvmppc_save_user_regs(void);
+
 extern int set_thread_tidr(struct task_struct *t);
 
 #endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 50436b52c213..3fca321b820d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1156,6 +1156,34 @@ static inline void save_sprs(struct thread_struct *t)
 #endif
 }
 
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvmppc_save_user_regs(void)
+{
+   unsigned long usermsr;
+
+   if (!current->thread.regs)
+   return;
+
+   usermsr = current->thread.regs->msr;
+
+   if (usermsr & MSR_FP)
+   save_fpu(current);
+
+   if (usermsr & MSR_VEC)
+   save_altivec(current);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (usermsr & MSR_TM) {
+   current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+   current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+   current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+   current->thread.regs->msr &= ~MSR_TM;
+   }
+#endif
+}
+EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
 static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
 {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index fca89ed2244f..16365c0e9872 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4140,6 +4140,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb, next_timer;
+   unsigned long msr;
int trap;
 
WARN_ON_ONCE(vcpu->arch.ceded);
@@ -4151,8 +4152,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
if (next_timer < time_limit)
time_limit = next_timer;
 
+   vcpu->arch.ceded = 0;
+
save_p9_host_os_sprs(&host_os_sprs);
 
+   /* MSR bits may have been cleared by context switch */
+   msr = 0;
+   if (IS_ENABLED(CONFIG_PPC_FPU))
+   msr |= MSR_FP;
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   msr |= MSR_VEC;
+   if (cpu_has_feature(CPU_FTR_VSX))
+   msr |= MSR_VSX;
+   if (cpu_has_feature(CPU_FTR_TM) ||
+   cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+   msr |= MSR_TM;
+   msr = msr_check_and_set(msr);
+
kvmppc_subcore_enter_guest();
 
vc->entry_exit_map = 1;
@@ -4161,12 +4177,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
 
if (cpu_has_feature(CPU_FTR_TM) ||
-   cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+   cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+   msr = mfmsr(); /* TM restore can update msr */
+   }
 
switch_pmu_to_guest(vcpu, &host_os_sprs);
 
-   msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
load_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
load_vr_state(&vcpu->arch.vr);
@@ -4275,7 +4292,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
 
-   msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
store_vr_state(&vcpu->arch.vr);
@@ -4825,19 +4841,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
unsigned long user_tar = 0;
unsigned int user_vrsave;
struct kvm *kvm;
+   unsigned long msr;
 
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
 
+   /* No need to go into the guest when all we'll do is come back out */
+   if (signal_pending(current)) {
+   run->exit_reason = KVM_EXIT_INTR;
+   return -EINTR;
+   }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Don't allow entr