The Guest HS-mode (aka L1/guest hypervisor) needs HFENCE instructions for TLB maintenance of the nested guest physical addresses so add corresponding HFENCE emulation.
Signed-off-by: Anup Patel <[email protected]> --- arch/riscv/include/asm/insn.h | 6 ++ arch/riscv/include/asm/kvm_vcpu_nested.h | 4 ++ arch/riscv/kvm/vcpu_insn.c | 10 +++ arch/riscv/kvm/vcpu_nested_insn.c | 86 ++++++++++++++++++++++++ 4 files changed, 106 insertions(+) diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index 24a8abb3283c..6896ba0581b5 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -340,6 +340,12 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code) #define INSN_MASK_WRS 0xffffffff #define INSN_MATCH_WRS 0x00d00073 +#define INSN_MASK_HFENCE_VVMA 0xfe007fff +#define INSN_MATCH_HFENCE_VVMA 0x22000073 + +#define INSN_MASK_HFENCE_GVMA 0xfe007fff +#define INSN_MATCH_HFENCE_GVMA 0x62000073 + #define INSN_MATCH_CSRRW 0x1073 #define INSN_MASK_CSRRW 0x707f #define INSN_MATCH_CSRRS 0x2073 diff --git a/arch/riscv/include/asm/kvm_vcpu_nested.h b/arch/riscv/include/asm/kvm_vcpu_nested.h index 5262ec4f37b7..db6d89cf9771 100644 --- a/arch/riscv/include/asm/kvm_vcpu_nested.h +++ b/arch/riscv/include/asm/kvm_vcpu_nested.h @@ -64,6 +64,10 @@ struct kvm_vcpu_nested { #define kvm_riscv_vcpu_nested_virt(__vcpu) ((__vcpu)->arch.nested.virt) int kvm_riscv_vcpu_nested_insn_sret(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn); +int kvm_riscv_vcpu_nested_insn_hfence_vvma(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn); +int kvm_riscv_vcpu_nested_insn_hfence_gvma(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn); int kvm_riscv_vcpu_nested_smode_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num, unsigned long *val, unsigned long new_val, diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 0246ca2d5e93..8f11cda133ac 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -303,6 +303,16 @@ static const struct insn_func system_opcode_funcs[] = { .match = INSN_MATCH_WRS, .func = wrs_insn, }, + { + .mask = INSN_MASK_HFENCE_VVMA, + .match = INSN_MATCH_HFENCE_VVMA, + .func = kvm_riscv_vcpu_nested_insn_hfence_vvma, + }, + { + .mask = INSN_MASK_HFENCE_GVMA, + .match = INSN_MATCH_HFENCE_GVMA, + .func = kvm_riscv_vcpu_nested_insn_hfence_gvma, + }, }; static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, diff --git a/arch/riscv/kvm/vcpu_nested_insn.c b/arch/riscv/kvm/vcpu_nested_insn.c index 8f5b2992dbb9..7e57d3215930 100644 --- a/arch/riscv/kvm/vcpu_nested_insn.c +++ b/arch/riscv/kvm/vcpu_nested_insn.c @@ -4,6 +4,7 @@ */ #include <linux/kvm_host.h> +#include <asm/insn.h> #include <asm/kvm_nacl.h> #include <asm/kvm_vcpu_insn.h> @@ -52,3 +53,88 @@ int kvm_riscv_vcpu_nested_insn_sret(struct kvm_vcpu *vcpu, struct kvm_run *run, return KVM_INSN_CONTINUE_SAME_SEPC; } + +int kvm_riscv_vcpu_nested_insn_hfence_vvma(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn) +{ + unsigned int vmid = (vcpu->arch.nested.csr.hgatp & HGATP_VMID) >> HGATP_VMID_SHIFT; + unsigned long vaddr = GET_RS1(insn, &vcpu->arch.guest_context); + unsigned int asid = GET_RS2(insn, &vcpu->arch.guest_context); + unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; + unsigned int rs2_num = (insn >> SH_RS2) & MASK_RX; + + /* + * If H-extension is not available for VCPU then forward trap + * as illegal instruction trap to virtual-HS mode. + */ + if (!riscv_isa_extension_available(vcpu->arch.isa, h)) + return KVM_INSN_ILLEGAL_TRAP; + + /* + * Trap from virtual-VS and virtual-VU modes should be forwarded + * to virtual-HS mode as a virtual instruction trap. + */ + if (kvm_riscv_vcpu_nested_virt(vcpu)) + return KVM_INSN_VIRTUAL_TRAP; + + /* + * H-extension instructions not allowed in virtual-U mode so + * forward trap as illegal instruction trap to virtual-HS mode. + */ + if (!(vcpu->arch.guest_context.hstatus & HSTATUS_SPVP)) + return KVM_INSN_ILLEGAL_TRAP; + + if (!rs1_num && !rs2_num) + kvm_riscv_vcpu_nested_swtlb_vvma_flush(vcpu, 0, 0, 0, vmid); + else if (!rs1_num && rs2_num) + kvm_riscv_vcpu_nested_swtlb_vvma_flush_asid(vcpu, 0, 0, 0, vmid, asid); + else if (rs1_num && !rs2_num) + kvm_riscv_vcpu_nested_swtlb_vvma_flush(vcpu, vaddr, PAGE_SIZE, PAGE_SHIFT, vmid); + else + kvm_riscv_vcpu_nested_swtlb_vvma_flush_asid(vcpu, vaddr, PAGE_SIZE, PAGE_SHIFT, + vmid, asid); + + return KVM_INSN_CONTINUE_NEXT_SEPC; +} + +int kvm_riscv_vcpu_nested_insn_hfence_gvma(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn) +{ + unsigned int vmid = GET_RS2(insn, &vcpu->arch.guest_context); + gpa_t gaddr = GET_RS1(insn, &vcpu->arch.guest_context) << 2; + unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; + unsigned int rs2_num = (insn >> SH_RS2) & MASK_RX; + + /* + * If H-extension is not available for VCPU then forward trap + * as illegal instruction trap to virtual-HS mode. + */ + if (!riscv_isa_extension_available(vcpu->arch.isa, h)) + return KVM_INSN_ILLEGAL_TRAP; + + /* + * Trap from virtual-VS and virtual-VU modes should be forwarded + * to virtual-HS mode as a virtual instruction trap. + */ + if (kvm_riscv_vcpu_nested_virt(vcpu)) + return KVM_INSN_VIRTUAL_TRAP; + + /* + * H-extension instructions not allowed in virtual-U mode so + * forward trap as illegal instruction trap to virtual-HS mode. + */ + if (!(vcpu->arch.guest_context.hstatus & HSTATUS_SPVP)) + return KVM_INSN_ILLEGAL_TRAP; + + if (!rs1_num && !rs2_num) + kvm_riscv_vcpu_nested_swtlb_gvma_flush(vcpu, 0, 0, 0); + else if (!rs1_num && rs2_num) + kvm_riscv_vcpu_nested_swtlb_gvma_flush_vmid(vcpu, 0, 0, 0, vmid); + else if (rs1_num && !rs2_num) + kvm_riscv_vcpu_nested_swtlb_gvma_flush(vcpu, gaddr, PAGE_SIZE, PAGE_SHIFT); + else + kvm_riscv_vcpu_nested_swtlb_gvma_flush_vmid(vcpu, gaddr, PAGE_SIZE, PAGE_SHIFT, + vmid); + + return KVM_INSN_CONTINUE_NEXT_SEPC; +} -- 2.43.0

