On 16/10/19 18:12, Anup Patel wrote:
> Currently, we track last value wrote to VSIP CSR using per-CPU
> vsip_shadow variable but this easily goes out-of-sync because
> Guest can update VSIP.SSIP bit directly.
> 
> To simplify things, we remove per-CPU vsip_shadow variable and
> unconditionally write vcpu->arch.guest_csr.vsip to VSIP CSR in
> run-loop.
> 
> Signed-off-by: Anup Patel <[email protected]>

Please squash this and patch 20 into the corresponding patches earlier
in the series.

Paolo

> ---
>  arch/riscv/include/asm/kvm_host.h |  3 ---
>  arch/riscv/kvm/main.c             |  6 ------
>  arch/riscv/kvm/vcpu.c             | 24 +-----------------------
>  3 files changed, 1 insertion(+), 32 deletions(-)
> 
> diff --git a/arch/riscv/include/asm/kvm_host.h 
> b/arch/riscv/include/asm/kvm_host.h
> index ec1ca4bc98f2..cd86acaed055 100644
> --- a/arch/riscv/include/asm/kvm_host.h
> +++ b/arch/riscv/include/asm/kvm_host.h
> @@ -202,9 +202,6 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu 
> *vcpu) {}
>  static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>  
> -int kvm_riscv_setup_vsip(void);
> -void kvm_riscv_cleanup_vsip(void);
> -
>  #define KVM_ARCH_WANT_MMU_NOTIFIER
>  int kvm_unmap_hva_range(struct kvm *kvm,
>                       unsigned long start, unsigned long end);
> diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
> index 55df85184241..002301a27d29 100644
> --- a/arch/riscv/kvm/main.c
> +++ b/arch/riscv/kvm/main.c
> @@ -61,17 +61,11 @@ void kvm_arch_hardware_disable(void)
>  
>  int kvm_arch_init(void *opaque)
>  {
> -     int ret;
> -
>       if (!riscv_isa_extension_available(NULL, h)) {
>               kvm_info("hypervisor extension not available\n");
>               return -ENODEV;
>       }
>  
> -     ret = kvm_riscv_setup_vsip();
> -     if (ret)
> -             return ret;
> -
>       kvm_riscv_stage2_vmid_detect();
>  
>       kvm_info("hypervisor extension available\n");
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index fd77cd39dd8c..f1a218d3a8cf 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -111,8 +111,6 @@ static void kvm_riscv_vcpu_host_fp_restore(struct 
> kvm_cpu_context *cntx) {}
>                                riscv_isa_extension_mask(s) | \
>                                riscv_isa_extension_mask(u))
>  
> -static unsigned long __percpu *vsip_shadow;
> -
>  static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
>  {
>       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> @@ -765,7 +763,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu 
> *vcpu,
>  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
>       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> -     unsigned long *vsip = raw_cpu_ptr(vsip_shadow);
>  
>       csr_write(CSR_VSSTATUS, csr->vsstatus);
>       csr_write(CSR_VSIE, csr->vsie);
> @@ -775,7 +772,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>       csr_write(CSR_VSCAUSE, csr->vscause);
>       csr_write(CSR_VSTVAL, csr->vstval);
>       csr_write(CSR_VSIP, csr->vsip);
> -     *vsip = csr->vsip;
>       csr_write(CSR_VSATP, csr->vsatp);
>  
>       kvm_riscv_stage2_update_hgatp(vcpu);
> @@ -843,26 +839,8 @@ static void kvm_riscv_check_vcpu_requests(struct 
> kvm_vcpu *vcpu)
>  static void kvm_riscv_update_vsip(struct kvm_vcpu *vcpu)
>  {
>       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> -     unsigned long *vsip = raw_cpu_ptr(vsip_shadow);
> -
> -     if (*vsip != csr->vsip) {
> -             csr_write(CSR_VSIP, csr->vsip);
> -             *vsip = csr->vsip;
> -     }
> -}
> -
> -int kvm_riscv_setup_vsip(void)
> -{
> -     vsip_shadow = alloc_percpu(unsigned long);
> -     if (!vsip_shadow)
> -             return -ENOMEM;
>  
> -     return 0;
> -}
> -
> -void kvm_riscv_cleanup_vsip(void)
> -{
> -     free_percpu(vsip_shadow);
> +     csr_write(CSR_VSIP, csr->vsip);
>  }
>  
>  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
> 

Reply via email to