From: Marc Zyngier <[email protected]>

The protected mode relies on a separate helper to load the
S2 context. Move over to the __load_guest_stage2() helper
instead.

Cc: Catalin Marinas <[email protected]>
Cc: Jade Alglave <[email protected]>
Cc: Shameer Kolothum <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
---
 arch/arm64/include/asm/kvm_mmu.h              | 11 +++--------
 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 +-
 arch/arm64/kvm/hyp/nvhe/mem_protect.c         |  2 +-
 3 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 05e089653a1a..934ef0deff9f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu 
*mmu)
  * Must be called from hyp code running at EL2 with an updated VTTBR
  * and interrupts disabled.
  */
-static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned 
long vtcr)
+static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
+                                               struct kvm_arch *arch)
 {
-       write_sysreg(vtcr, vtcr_el2);
+       write_sysreg(arch->vtcr, vtcr_el2);
        write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
 
        /*
@@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct 
kvm_s2_mmu *mmu, unsigned long
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
-                                               struct kvm_arch *arch)
-{
-       __load_stage2(mmu, arch->vtcr);
-}
-
 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
        return container_of(mmu->arch, struct kvm, arch);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h 
b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 9c227d87c36d..a910648bc71b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 static __always_inline void __load_host_stage2(void)
 {
        if (static_branch_likely(&kvm_protected_mode_initialized))
-               __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+               __load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
        else
                write_sysreg(0, vttbr_el2);
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c 
b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d938ce95d3bd..d4e74ca7f876 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
        kvm_flush_dcache_to_poc(params, sizeof(*params));
 
        write_sysreg(params->hcr_el2, hcr_el2);
-       __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+       __load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
 
        /*
         * Make sure to have an ISB before the TLB maintenance below but only
-- 
2.32.0.605.g8dce9f2422-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to