From: Atish Patra <atish.pa...@wdc.com>

The KVM host kernel running in HS-mode needs to handle SBI calls coming
from guest kernel running in VS-mode.

This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
implemented correctly except remote tlb flushes. For remote TLB flushes,
we are doing full TLB flush and this will be optimized in future.

Signed-off-by: Atish Patra <atish.pa...@wdc.com>
Signed-off-by: Anup Patel <anup.pa...@wdc.com>
Acked-by: Paolo Bonzini <pbonz...@redhat.com>
Reviewed-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/riscv/include/asm/kvm_host.h |   2 +
 arch/riscv/kvm/Makefile           |   2 +-
 arch/riscv/kvm/vcpu_exit.c        |   4 ++
 arch/riscv/kvm/vcpu_sbi.c         | 107 ++++++++++++++++++++++++++++++
 4 files changed, 114 insertions(+), 1 deletion(-)
 create mode 100644 arch/riscv/kvm/vcpu_sbi.c

diff --git a/arch/riscv/include/asm/kvm_host.h 
b/arch/riscv/include/asm/kvm_host.h
index 8c7f947b31b6..58cb6789f502 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -250,4 +250,6 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, 
unsigned long mask);
 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
 
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
 #endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 3e0c7558320d..b56dc1650d2c 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
 kvm-objs := $(common-objs-y)
 
 kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
-kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
+kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
 
 obj-$(CONFIG_KVM)      += kvm.o
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 7507b859246b..0e9b0ffa169d 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -587,6 +587,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct 
kvm_run *run,
                    (vcpu->arch.guest_context.hstatus & HSTATUS_STL))
                        ret = stage2_page_fault(vcpu, run, scause, stval);
                break;
+       case EXC_SUPERVISOR_SYSCALL:
+               if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+                       ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
+               break;
        default:
                break;
        };
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
new file mode 100644
index 000000000000..3d0c33c94daf
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ *     Atish Patra <atish.pa...@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+
+#define SBI_VERSION_MAJOR                      0
+#define SBI_VERSION_MINOR                      1
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
+                                   struct kvm_run *run, u32 type)
+{
+       int i;
+       struct kvm_vcpu *tmp;
+
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+               tmp->arch.power_off = true;
+       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+       memset(&run->system_event, 0, sizeof(run->system_event));
+       run->system_event.type = type;
+       run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int i, ret = 1;
+       u64 next_cycle;
+       struct kvm_vcpu *rvcpu;
+       bool next_sepc = true;
+       ulong hmask, ut_scause = 0;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+
+       if (!cp)
+               return -EINVAL;
+
+       switch (cp->a7) {
+       case SBI_EXT_0_1_SET_TIMER:
+#if __riscv_xlen == 32
+               next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+               next_cycle = (u64)cp->a0;
+#endif
+               kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+               break;
+       case SBI_EXT_0_1_CLEAR_IPI:
+               kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
+               break;
+       case SBI_EXT_0_1_SEND_IPI:
+               hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+                                                  &ut_scause);
+               if (ut_scause) {
+                       kvm_riscv_vcpu_trap_redirect(vcpu, ut_scause,
+                                                    cp->a0);
+                       next_sepc = false;
+               } else {
+                       for_each_set_bit(i, &hmask, BITS_PER_LONG) {
+                               rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+                               kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_S_SOFT);
+                       }
+               }
+               break;
+       case SBI_EXT_0_1_SHUTDOWN:
+               kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
+               next_sepc = false;
+               ret = 0;
+               break;
+       case SBI_EXT_0_1_REMOTE_FENCE_I:
+               sbi_remote_fence_i(NULL);
+               break;
+       /*
+        * TODO: There should be a way to call remote hfence.bvma.
+        * Preferred method is now a SBI call. Until then, just flush
+        * all tlbs.
+        */
+       case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
+               /* TODO: Parse vma range. */
+               sbi_remote_sfence_vma(NULL, 0, 0);
+               break;
+       case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
+               /* TODO: Parse vma range for given ASID */
+               sbi_remote_sfence_vma(NULL, 0, 0);
+               break;
+       default:
+               /*
+                * For now, just return error to Guest.
+                * TODO: In-future, we will route unsupported SBI calls
+                * to user-space.
+                */
+               cp->a0 = -ENOTSUPP;
+               break;
+       };
+
+       if (next_sepc)
+               cp->sepc += 4;
+
+       return ret;
+}
-- 
2.17.1

Reply via email to