On Friday 06 Aug 2021 at 12:31:08 (+0100), Will Deacon wrote:
> From: Marc Zyngier <[email protected]>
> 
> Since TLB invalidation can run in parallel with VMID allocation,
> we need to be careful and avoid any sort of load/store tearing.
> Use {READ,WRITE}_ONCE consistently to avoid any surprise.
> 
> Cc: Catalin Marinas <[email protected]>
> Cc: Jade Alglave <[email protected]>
> Cc: Shameer Kolothum <[email protected]>
> Signed-off-by: Marc Zyngier <[email protected]>
> Signed-off-by: Will Deacon <[email protected]>
> ---
>  arch/arm64/include/asm/kvm_mmu.h      | 7 ++++++-
>  arch/arm64/kvm/arm.c                  | 2 +-
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 ++--
>  arch/arm64/kvm/mmu.c                  | 2 +-
>  4 files changed, 10 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index 934ef0deff9f..5828dd8fa738 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, 
> gpa_t gpa,
>  
>  #define kvm_phys_to_vttbr(addr)              phys_to_ttbr(addr)
>  
> +/*
> + * When this is (directly or indirectly) used on the TLB invalidation
> + * path, we rely on a previously issued DSB so that page table updates
> + * and VMID reads are correctly ordered.
> + */
>  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>  {
>       struct kvm_vmid *vmid = &mmu->vmid;
> @@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct 
> kvm_s2_mmu *mmu)
>       u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
>  
>       baddr = mmu->pgd_phys;
> -     vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
> +     vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
>       return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
>  }
>  
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index e9a2b8f27792..658f76067f46 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -571,7 +571,7 @@ static void update_vmid(struct kvm_vmid *vmid)
>               kvm_call_hyp(__kvm_flush_vm_context);
>       }
>  
> -     vmid->vmid = kvm_next_vmid;
> +     WRITE_ONCE(vmid->vmid, kvm_next_vmid);
>       kvm_next_vmid++;
>       kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
>  
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c 
> b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index d4e74ca7f876..55ae97a144b8 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -109,8 +109,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
>       mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
>       mmu->arch = &host_kvm.arch;
>       mmu->pgt = &host_kvm.pgt;
> -     mmu->vmid.vmid_gen = 0;
> -     mmu->vmid.vmid = 0;
> +     WRITE_ONCE(mmu->vmid.vmid_gen, 0);
> +     WRITE_ONCE(mmu->vmid.vmid, 0);

I'm guessing it should be safe to omit those? But they certainly don't
harm and can serve as documentation anyway, so:

Reviewed-by: Quentin Perret <[email protected]>

Thanks,
Quentin

>  
>       return 0;
>  }
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 3155c9e778f0..b1a6eaec28ff 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -485,7 +485,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct 
> kvm_s2_mmu *mmu)
>       mmu->arch = &kvm->arch;
>       mmu->pgt = pgt;
>       mmu->pgd_phys = __pa(pgt->pgd);
> -     mmu->vmid.vmid_gen = 0;
> +     WRITE_ONCE(mmu->vmid.vmid_gen, 0);
>       return 0;
>  
>  out_destroy_pgtable:
> -- 
> 2.32.0.605.g8dce9f2422-goog
> 
_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to