Re: [PATCH v4 39/40] KVM: arm/arm64: Move VGIC APR save/restore to vgic put/load

2018-02-22 Thread Christoffer Dall
On Thu, Feb 22, 2018 at 01:11:55PM +, Marc Zyngier wrote:
> On 15/02/18 21:03, Christoffer Dall wrote:
> > The APRs can only have bits set when the guest acknowledges an interrupt
> > in the LR and can only have a bit cleared when the guest EOIs an
> > interrupt in the LR.  Therefore, if we have no LRs with any
> > pending/active interrupts, the APR cannot change value and there is no
> > need to clear it on every exit from the VM (hint: it will have already
> > been cleared when we exited the guest the last time with the LRs all
> > EOIed).
> > 
> > The only case we need to take care of is when we migrate the VCPU away
> > from a CPU or migrate a new VCPU onto a CPU, or when we return to
> > userspace to capture the state of the VCPU for migration.  To make sure
> > this works, factor out the APR save/restore functionality into separate
> > functions called from the VCPU (and by extension VGIC) put/load hooks.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm/include/asm/kvm_hyp.h   |   2 +
> >  arch/arm64/include/asm/kvm_hyp.h |   2 +
> >  virt/kvm/arm/hyp/vgic-v3-sr.c| 124 
> > +--
> >  virt/kvm/arm/vgic/vgic-v2.c  |   7 +--
> >  virt/kvm/arm/vgic/vgic-v3.c  |   5 ++
> >  5 files changed, 78 insertions(+), 62 deletions(-)
> > 
> > diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
> > index 1ab8329e9ff7..530a3c1cfe6f 100644
> > --- a/arch/arm/include/asm/kvm_hyp.h
> > +++ b/arch/arm/include/asm/kvm_hyp.h
> > @@ -110,6 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context 
> > *ctxt);
> >  
> >  void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
> >  void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
> > +void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
> > +void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
> >  
> >  asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
> >  asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
> > diff --git a/arch/arm64/include/asm/kvm_hyp.h 
> > b/arch/arm64/include/asm/kvm_hyp.h
> > index febe417b8b4e..6f3929b2fcf7 100644
> > --- a/arch/arm64/include/asm/kvm_hyp.h
> > +++ b/arch/arm64/include/asm/kvm_hyp.h
> > @@ -124,6 +124,8 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu 
> > *vcpu);
> >  
> >  void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
> >  void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
> > +void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
> > +void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
> >  int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
> >  
> >  void __timer_enable_traps(struct kvm_vcpu *vcpu);
> > diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> > index 9abf2f3c12b5..437d7af08683 100644
> > --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> > +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> > @@ -21,6 +21,7 @@
> >  
> >  #include 
> >  #include 
> > +#include 
> >  
> >  #define vtr_to_max_lr_idx(v)   ((v) & 0xf)
> >  #define vtr_to_nr_pre_bits(v)  u32)(v) >> 26) & 7) + 1)
> > @@ -221,14 +222,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu 
> > *vcpu)
> >  
> > if (used_lrs) {
> > int i;
> > -   u32 nr_pre_bits;
> > u32 elrsr;
> >  
> > elrsr = read_gicreg(ICH_ELSR_EL2);
> >  
> > write_gicreg(0, ICH_HCR_EL2);
> > -   val = read_gicreg(ICH_VTR_EL2);
> > -   nr_pre_bits = vtr_to_nr_pre_bits(val);
> >  
> > for (i = 0; i < used_lrs; i++) {
> > if (elrsr & (1 << i))
> > @@ -238,39 +236,10 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu 
> > *vcpu)
> >  
> > __gic_v3_set_lr(0, i);
> > }
> > -
> > -   switch (nr_pre_bits) {
> > -   case 7:
> > -   cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
> > -   cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
> > -   case 6:
> > -   cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
> > -   default:
> > -   cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
> > -   }
> > -
> > -   switch (nr_pre_bits) {
> > -   case 7:
> > -   cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
> > -   cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
> > -   case 6:
> > -   cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
> > -   default:
> > -   cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
> > -   }
> > } else {
> > if (static_branch_unlikely(_v3_cpuif_trap) ||
> > cpu_if->its_vpe.its_vm)
> > write_gicreg(0, ICH_HCR_EL2);
> > -
> > -   cpu_if->vgic_ap0r[0] = 0;
> > -   cpu_if->vgic_ap0r[1] = 0;
> > -   cpu_if->vgic_ap0r[2] = 0;
> > -   cpu_if->vgic_ap0r[3] = 0;
> > -   

Re: [PATCH v4 39/40] KVM: arm/arm64: Move VGIC APR save/restore to vgic put/load

2018-02-22 Thread Marc Zyngier
On 15/02/18 21:03, Christoffer Dall wrote:
> The APRs can only have bits set when the guest acknowledges an interrupt
> in the LR and can only have a bit cleared when the guest EOIs an
> interrupt in the LR.  Therefore, if we have no LRs with any
> pending/active interrupts, the APR cannot change value and there is no
> need to clear it on every exit from the VM (hint: it will have already
> been cleared when we exited the guest the last time with the LRs all
> EOIed).
> 
> The only case we need to take care of is when we migrate the VCPU away
> from a CPU or migrate a new VCPU onto a CPU, or when we return to
> userspace to capture the state of the VCPU for migration.  To make sure
> this works, factor out the APR save/restore functionality into separate
> functions called from the VCPU (and by extension VGIC) put/load hooks.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  arch/arm/include/asm/kvm_hyp.h   |   2 +
>  arch/arm64/include/asm/kvm_hyp.h |   2 +
>  virt/kvm/arm/hyp/vgic-v3-sr.c| 124 
> +--
>  virt/kvm/arm/vgic/vgic-v2.c  |   7 +--
>  virt/kvm/arm/vgic/vgic-v3.c  |   5 ++
>  5 files changed, 78 insertions(+), 62 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
> index 1ab8329e9ff7..530a3c1cfe6f 100644
> --- a/arch/arm/include/asm/kvm_hyp.h
> +++ b/arch/arm/include/asm/kvm_hyp.h
> @@ -110,6 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
>  
>  void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
>  void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
> +void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
> +void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
>  
>  asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
>  asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
> diff --git a/arch/arm64/include/asm/kvm_hyp.h 
> b/arch/arm64/include/asm/kvm_hyp.h
> index febe417b8b4e..6f3929b2fcf7 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -124,6 +124,8 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
>  
>  void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
>  void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
> +void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
> +void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
>  int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
>  
>  void __timer_enable_traps(struct kvm_vcpu *vcpu);
> diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> index 9abf2f3c12b5..437d7af08683 100644
> --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> @@ -21,6 +21,7 @@
>  
>  #include 
>  #include 
> +#include 
>  
>  #define vtr_to_max_lr_idx(v) ((v) & 0xf)
>  #define vtr_to_nr_pre_bits(v)u32)(v) >> 26) & 7) + 1)
> @@ -221,14 +222,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu 
> *vcpu)
>  
>   if (used_lrs) {
>   int i;
> - u32 nr_pre_bits;
>   u32 elrsr;
>  
>   elrsr = read_gicreg(ICH_ELSR_EL2);
>  
>   write_gicreg(0, ICH_HCR_EL2);
> - val = read_gicreg(ICH_VTR_EL2);
> - nr_pre_bits = vtr_to_nr_pre_bits(val);
>  
>   for (i = 0; i < used_lrs; i++) {
>   if (elrsr & (1 << i))
> @@ -238,39 +236,10 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu 
> *vcpu)
>  
>   __gic_v3_set_lr(0, i);
>   }
> -
> - switch (nr_pre_bits) {
> - case 7:
> - cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
> - cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
> - case 6:
> - cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
> - default:
> - cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
> - }
> -
> - switch (nr_pre_bits) {
> - case 7:
> - cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
> - cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
> - case 6:
> - cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
> - default:
> - cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
> - }
>   } else {
>   if (static_branch_unlikely(_v3_cpuif_trap) ||
>   cpu_if->its_vpe.its_vm)
>   write_gicreg(0, ICH_HCR_EL2);
> -
> - cpu_if->vgic_ap0r[0] = 0;
> - cpu_if->vgic_ap0r[1] = 0;
> - cpu_if->vgic_ap0r[2] = 0;
> - cpu_if->vgic_ap0r[3] = 0;
> - cpu_if->vgic_ap1r[0] = 0;
> - cpu_if->vgic_ap1r[1] = 0;
> - cpu_if->vgic_ap1r[2] = 0;
> - cpu_if->vgic_ap1r[3] = 0;
>   }
>  
>   val = read_gicreg(ICC_SRE_EL2);
> @@ -287,8 +256,6 @@ void