Re: [PATCH v7 18/21] RISC-V: KVM: Add SBI v0.1 support

2019-09-23 Thread Anup Patel
On Mon, Sep 23, 2019 at 12:31 PM Alexander Graf  wrote:
>
>
>
> On 04.09.19 18:16, Anup Patel wrote:
> > From: Atish Patra 
> >
> > The KVM host kernel running in HS-mode needs to handle SBI calls coming
> > from guest kernel running in VS-mode.
> >
> > This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
> > implemented correctly except remote tlb flushes. For remote TLB flushes,
> > we are doing full TLB flush and this will be optimized in future.
> >
> > Signed-off-by: Atish Patra 
> > Signed-off-by: Anup Patel 
> > Acked-by: Paolo Bonzini 
> > Reviewed-by: Paolo Bonzini 
> > ---
> >   arch/riscv/include/asm/kvm_host.h |   2 +
> >   arch/riscv/kvm/Makefile   |   2 +-
> >   arch/riscv/kvm/vcpu_exit.c|   3 +
> >   arch/riscv/kvm/vcpu_sbi.c | 104 ++
> >   4 files changed, 110 insertions(+), 1 deletion(-)
> >   create mode 100644 arch/riscv/kvm/vcpu_sbi.c
> >
> > diff --git a/arch/riscv/include/asm/kvm_host.h 
> > b/arch/riscv/include/asm/kvm_host.h
> > index 928c67828b1b..269bfa5641b1 100644
> > --- a/arch/riscv/include/asm/kvm_host.h
> > +++ b/arch/riscv/include/asm/kvm_host.h
> > @@ -250,4 +250,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu 
> > *vcpu);
> >   void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
> >   void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
> >
> > +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);
> > +
> >   #endif /* __RISCV_KVM_HOST_H__ */
> > diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
> > index 3e0c7558320d..b56dc1650d2c 100644
> > --- a/arch/riscv/kvm/Makefile
> > +++ b/arch/riscv/kvm/Makefile
> > @@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
> >   kvm-objs := $(common-objs-y)
> >
> >   kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
> > -kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
> > +kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
> >
> >   obj-$(CONFIG_KVM)   += kvm.o
> > diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
> > index 39469f67b241..0ee4e8943f4f 100644
> > --- a/arch/riscv/kvm/vcpu_exit.c
> > +++ b/arch/riscv/kvm/vcpu_exit.c
> > @@ -594,6 +594,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct 
> > kvm_run *run,
> >   (vcpu->arch.guest_context.hstatus & HSTATUS_STL))
> >   ret = stage2_page_fault(vcpu, run, scause, stval);
> >   break;
> > + case EXC_SUPERVISOR_SYSCALL:
> > + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
> > + ret = kvm_riscv_vcpu_sbi_ecall(vcpu);
>
> implicit fall-through

Okay, I will add break here.

>
> >   default:
> >   break;
> >   };
> > diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
> > new file mode 100644
> > index ..b415b8b54bb1
> > --- /dev/null
> > +++ b/arch/riscv/kvm/vcpu_sbi.c
> > @@ -0,0 +1,104 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/**
> > + * Copyright (c) 2019 Western Digital Corporation or its affiliates.
> > + *
> > + * Authors:
> > + * Atish Patra 
> > + */
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#define SBI_VERSION_MAJOR0
> > +#define SBI_VERSION_MINOR1
> > +
> > +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
> > +{
> > + int i;
> > + struct kvm_vcpu *tmp;
> > +
> > + kvm_for_each_vcpu(i, tmp, vcpu->kvm)
> > + tmp->arch.power_off = true;
> > + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
> > +
> > + memset(>run->system_event, 0, sizeof(vcpu->run->system_event));
> > + vcpu->run->system_event.type = type;
> > + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
>
> Is there a particular reason this has to be implemented in kernel space?

It's not implemented in kernel space. We are forwarding it to user space
using exit reason KVM_EXIT_SYSTEM_EVENT. These exit reason is
arch independent and both QEMU and KVMTOOL already implement
it in arch independent way.

> It's not performance critical and all stopping vcpus is something user
> space should be able to do as well, no?

Yes, it's not performance critical but it's done in user space.

>
> > +}
> > +
> > +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
> > +{
> > + int i, ret = 1;
> > + u64 next_cycle;
> > + struct kvm_vcpu *rvcpu;
> > + bool next_sepc = true;
> > + ulong hmask, ut_scause = 0;
> > + struct kvm_cpu_context *cp = >arch.guest_context;
> > +
> > + if (!cp)
> > + return -EINVAL;
> > +
> > + switch (cp->a7) {
> > + case SBI_SET_TIMER:
> > +#if __riscv_xlen == 32
> > + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
> > +#else
> > + next_cycle = (u64)cp->a0;
> > +#endif
> > + kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
> > + break;
> > + case SBI_CLEAR_IPI:
> > +   

Re: [PATCH v7 18/21] RISC-V: KVM: Add SBI v0.1 support

2019-09-23 Thread Alexander Graf



On 04.09.19 18:16, Anup Patel wrote:

From: Atish Patra 

The KVM host kernel running in HS-mode needs to handle SBI calls coming
from guest kernel running in VS-mode.

This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
implemented correctly except remote tlb flushes. For remote TLB flushes,
we are doing full TLB flush and this will be optimized in future.

Signed-off-by: Atish Patra 
Signed-off-by: Anup Patel 
Acked-by: Paolo Bonzini 
Reviewed-by: Paolo Bonzini 
---
  arch/riscv/include/asm/kvm_host.h |   2 +
  arch/riscv/kvm/Makefile   |   2 +-
  arch/riscv/kvm/vcpu_exit.c|   3 +
  arch/riscv/kvm/vcpu_sbi.c | 104 ++
  4 files changed, 110 insertions(+), 1 deletion(-)
  create mode 100644 arch/riscv/kvm/vcpu_sbi.c

diff --git a/arch/riscv/include/asm/kvm_host.h 
b/arch/riscv/include/asm/kvm_host.h
index 928c67828b1b..269bfa5641b1 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -250,4 +250,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
  void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
  void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
  
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);

+
  #endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 3e0c7558320d..b56dc1650d2c 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
  kvm-objs := $(common-objs-y)
  
  kvm-objs += main.o vm.o vmid.o tlb.o mmu.o

-kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
+kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
  
  obj-$(CONFIG_KVM)	+= kvm.o

diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 39469f67b241..0ee4e8943f4f 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -594,6 +594,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct 
kvm_run *run,
(vcpu->arch.guest_context.hstatus & HSTATUS_STL))
ret = stage2_page_fault(vcpu, run, scause, stval);
break;
+   case EXC_SUPERVISOR_SYSCALL:
+   if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+   ret = kvm_riscv_vcpu_sbi_ecall(vcpu);


implicit fall-through


default:
break;
};
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
new file mode 100644
index ..b415b8b54bb1
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra 
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define SBI_VERSION_MAJOR  0
+#define SBI_VERSION_MINOR  1
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
+{
+   int i;
+   struct kvm_vcpu *tmp;
+
+   kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+   tmp->arch.power_off = true;
+   kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+   memset(>run->system_event, 0, sizeof(vcpu->run->system_event));
+   vcpu->run->system_event.type = type;
+   vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;


Is there a particular reason this has to be implemented in kernel space? 
It's not performance critical and all stopping vcpus is something user 
space should be able to do as well, no?



+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
+{
+   int i, ret = 1;
+   u64 next_cycle;
+   struct kvm_vcpu *rvcpu;
+   bool next_sepc = true;
+   ulong hmask, ut_scause = 0;
+   struct kvm_cpu_context *cp = >arch.guest_context;
+
+   if (!cp)
+   return -EINVAL;
+
+   switch (cp->a7) {
+   case SBI_SET_TIMER:
+#if __riscv_xlen == 32
+   next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+   next_cycle = (u64)cp->a0;
+#endif
+   kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+   break;
+   case SBI_CLEAR_IPI:
+   kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
+   break;
+   case SBI_SEND_IPI:
+   hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+  _scause);
+   if (ut_scause) {
+   kvm_riscv_vcpu_trap_redirect(vcpu, ut_scause,
+cp->a0);
+   next_sepc = false;
+   } else {
+   for_each_set_bit(i, , BITS_PER_LONG) {
+   rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+   kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_S_SOFT);
+   }
+   }
+   break;
+   case SBI_SHUTDOWN:
+

Re: [PATCH v7 18/21] RISC-V: KVM: Add SBI v0.1 support

2019-09-05 Thread Andreas Schwab
On Sep 04 2019, Anup Patel  wrote:

> From: Atish Patra 
>
> The KVM host kernel running in HS-mode needs to handle SBI calls coming
> from guest kernel running in VS-mode.
>
> This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
> implemented correctly except remote tlb flushes. For remote TLB flushes,
> we are doing full TLB flush and this will be optimized in future.

Note that this conflicts with
https://patchwork.kernel.org/patch/11107221/ which removes 
from .  You should probably include that header
explicitly in arch/riscv/kvm/vcpu_sbi.c.

Andreas.

-- 
Andreas Schwab, SUSE Labs, sch...@suse.de
GPG Key fingerprint = 0196 BAD8 1CE9 1970 F4BE  1748 E4D4 88E3 0EEA B9D7
"And now for something completely different."


[PATCH v7 18/21] RISC-V: KVM: Add SBI v0.1 support

2019-09-04 Thread Anup Patel
From: Atish Patra 

The KVM host kernel running in HS-mode needs to handle SBI calls coming
from guest kernel running in VS-mode.

This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
implemented correctly except remote tlb flushes. For remote TLB flushes,
we are doing full TLB flush and this will be optimized in future.

Signed-off-by: Atish Patra 
Signed-off-by: Anup Patel 
Acked-by: Paolo Bonzini 
Reviewed-by: Paolo Bonzini 
---
 arch/riscv/include/asm/kvm_host.h |   2 +
 arch/riscv/kvm/Makefile   |   2 +-
 arch/riscv/kvm/vcpu_exit.c|   3 +
 arch/riscv/kvm/vcpu_sbi.c | 104 ++
 4 files changed, 110 insertions(+), 1 deletion(-)
 create mode 100644 arch/riscv/kvm/vcpu_sbi.c

diff --git a/arch/riscv/include/asm/kvm_host.h 
b/arch/riscv/include/asm/kvm_host.h
index 928c67828b1b..269bfa5641b1 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -250,4 +250,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
 
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);
+
 #endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 3e0c7558320d..b56dc1650d2c 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
 kvm-objs := $(common-objs-y)
 
 kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
-kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
+kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
 
 obj-$(CONFIG_KVM)  += kvm.o
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 39469f67b241..0ee4e8943f4f 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -594,6 +594,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct 
kvm_run *run,
(vcpu->arch.guest_context.hstatus & HSTATUS_STL))
ret = stage2_page_fault(vcpu, run, scause, stval);
break;
+   case EXC_SUPERVISOR_SYSCALL:
+   if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+   ret = kvm_riscv_vcpu_sbi_ecall(vcpu);
default:
break;
};
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
new file mode 100644
index ..b415b8b54bb1
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra 
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define SBI_VERSION_MAJOR  0
+#define SBI_VERSION_MINOR  1
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
+{
+   int i;
+   struct kvm_vcpu *tmp;
+
+   kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+   tmp->arch.power_off = true;
+   kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+   memset(>run->system_event, 0, sizeof(vcpu->run->system_event));
+   vcpu->run->system_event.type = type;
+   vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
+{
+   int i, ret = 1;
+   u64 next_cycle;
+   struct kvm_vcpu *rvcpu;
+   bool next_sepc = true;
+   ulong hmask, ut_scause = 0;
+   struct kvm_cpu_context *cp = >arch.guest_context;
+
+   if (!cp)
+   return -EINVAL;
+
+   switch (cp->a7) {
+   case SBI_SET_TIMER:
+#if __riscv_xlen == 32
+   next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+   next_cycle = (u64)cp->a0;
+#endif
+   kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+   break;
+   case SBI_CLEAR_IPI:
+   kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
+   break;
+   case SBI_SEND_IPI:
+   hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+  _scause);
+   if (ut_scause) {
+   kvm_riscv_vcpu_trap_redirect(vcpu, ut_scause,
+cp->a0);
+   next_sepc = false;
+   } else {
+   for_each_set_bit(i, , BITS_PER_LONG) {
+   rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+   kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_S_SOFT);
+   }
+   }
+   break;
+   case SBI_SHUTDOWN:
+   kvm_sbi_system_shutdown(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+   ret = 0;
+   break;
+   case SBI_REMOTE_FENCE_I:
+   sbi_remote_fence_i(NULL);
+   break;
+   /*
+* TODO: There should be a way to