Since the host is called from the hyp run loop, we can use that context
to identify calls from the host rather than checking VTTBR_EL2, which
will be used for the host's stage 2 in future.

Moving this to C also allows for more flexibiliy e.g. in applying
policies, such as forbidding HVC_RESET_VECTORS, based on the current
state of the hypervisor and removes the special casing for nVHE in the
exception handler.

Control over arch workaround 2 is made available to the host, the same
as any other vcpu.

Signed-off-by: Andrew Scull <[email protected]>
---
 arch/arm64/kvm/hyp/hyp-entry.S     | 36 ++-------------------
 arch/arm64/kvm/hyp/nvhe/hyp-init.S | 15 +++++++--
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 50 +++++++++++++++++++++++++++++-
 3 files changed, 65 insertions(+), 36 deletions(-)

diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index a45459d1c135..3113665ce912 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -17,50 +17,20 @@
 
        .text
 
-el1_sync:                              // Guest trapped into EL2
-
+el1_sync:
        mrs     x0, esr_el2
        lsr     x0, x0, #ESR_ELx_EC_SHIFT
        cmp     x0, #ESR_ELx_EC_HVC64
        ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
        b.ne    el1_trap
 
-#ifdef __KVM_NVHE_HYPERVISOR__
-       mrs     x1, vttbr_el2           // If vttbr is valid, the guest
-       cbnz    x1, el1_hvc_guest       // called HVC
-
-       /* Here, we're pretty sure the host called HVC. */
-       ldp     x0, x1, [sp]
-
-       /* Check for a stub HVC call */
-       cmp     x0, #HVC_STUB_HCALL_NR
-       b.hs    el1_trap
-       add     sp, sp, #16
-
-       /*
-        * Compute the idmap address of __kvm_handle_stub_hvc and
-        * jump there. Since we use kimage_voffset, do not use the
-        * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
-        * (by loading it from the constant pool).
-        *
-        * Preserve x0-x4, which may contain stub parameters.
-        */
-       ldr     x5, =__kvm_handle_stub_hvc
-       ldr_l   x6, kimage_voffset
-
-       /* x5 = __pa(x5) */
-       sub     x5, x5, x6
-       br      x5
-#endif /* __KVM_NVHE_HYPERVISOR__ */
-
-el1_hvc_guest:
        /*
         * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
         * The workaround has already been applied on the host,
         * so let's quickly get back to the guest. We don't bother
         * restoring x1, as it can be clobbered anyway.
         */
-       ldr     x1, [sp]                                // Guest's x0
+       ldr     x1, [sp]                                // vcpu's x0
        eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
        cbz     w1, wa_epilogue
 
@@ -77,7 +47,7 @@ alternative_cb_end
        ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
 
        // Sanitize the argument and update the guest flags
-       ldr     x1, [sp, #8]                    // Guest's x1
+       ldr     x1, [sp, #8]                    // vcpu's x1
        clz     w1, w1                          // Murphy's device:
        lsr     w1, w1, #5                      // w1 = !!w1 without using
        eor     w1, w1, #1                      // the flags...
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S 
b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index df2a7904a83b..43e1ee6178d4 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -15,6 +15,9 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+#include <asm/kvm_asm.h>
+#include <asm/kvm_ptrauth.h>
+
        .text
        .pushsection    .hyp.idmap.text, "ax"
 
@@ -137,6 +140,7 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
        cmp     x0, #HVC_SOFT_RESTART
        b.ne    1f
 
+SYM_INNER_LABEL(__kvm_handle_stub_hvc_soft_restart, SYM_L_GLOBAL)
        /* This is where we're about to jump, staying at EL2 */
        msr     elr_el2, x1
        mov     x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | 
PSR_MODE_EL2h)
@@ -146,11 +150,18 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
        mov     x0, x2
        mov     x1, x3
        mov     x2, x4
-       b       reset
+       b       2f
 
 1:     cmp     x0, #HVC_RESET_VECTORS
        b.ne    1f
-reset:
+
+SYM_INNER_LABEL(__kvm_handle_stub_hvc_reset_vectors, SYM_L_GLOBAL)
+       /* Restore host's ptrauth, sp_el0 and callee saved regs */
+       ptrauth_switch_to_guest x5, x6, x7, x8
+       restore_sp_el0 x5, x6
+       restore_callee_saved_regs x5
+
+2:
        /*
         * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
         * case we coming via HVC_SOFT_RESTART.
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 213977634601..d013586e3a03 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -13,6 +13,51 @@
 
 #include <kvm/arm_hypercalls.h>
 
+typedef unsigned long (*stub_hvc_handler_t)
+       (unsigned long, unsigned long, unsigned long, unsigned long,
+        unsigned long, struct kvm_cpu_context *);
+
+extern char __kvm_handle_stub_hvc_soft_restart[];
+extern char __kvm_handle_stub_hvc_reset_vectors[];
+
+static void handle_stub_hvc(unsigned long func_id, struct kvm_vcpu *host_vcpu)
+{
+       char *stub_hvc_handler_kern_va;
+       __noreturn stub_hvc_handler_t stub_hvc_handler;
+
+       /*
+        * The handlers of the supported stub HVCs disable the MMU so they must
+        * be called in the idmap. We compute the idmap address by subtracting
+        * kimage_voffset from the kernel VA handler.
+        */
+       switch (func_id) {
+       case HVC_SOFT_RESTART:
+               asm volatile("ldr %0, =%1"
+                            : "=r" (stub_hvc_handler_kern_va)
+                            : "S" (__kvm_handle_stub_hvc_soft_restart));
+               break;
+       case HVC_RESET_VECTORS:
+               asm volatile("ldr %0, =%1"
+                            : "=r" (stub_hvc_handler_kern_va)
+                            : "S" (__kvm_handle_stub_hvc_reset_vectors));
+               break;
+       default:
+               vcpu_set_reg(host_vcpu, 0, HVC_STUB_ERR);
+               return;
+       }
+
+       stub_hvc_handler = (__noreturn stub_hvc_handler_t)
+               (stub_hvc_handler_kern_va - kimage_voffset);
+
+       /* Preserve x0-x4, which may contain stub parameters. */
+       stub_hvc_handler(func_id,
+                        vcpu_get_reg(host_vcpu, 1),
+                        vcpu_get_reg(host_vcpu, 2),
+                        vcpu_get_reg(host_vcpu, 3),
+                        vcpu_get_reg(host_vcpu, 4),
+                        &host_vcpu->arch.ctxt);
+}
+
 static void handle_host_hcall(unsigned long func_id, struct kvm_vcpu 
*host_vcpu)
 {
        unsigned long ret = 0;
@@ -105,7 +150,10 @@ static void handle_trap(struct kvm_vcpu *host_vcpu) {
        if (kvm_vcpu_trap_get_class(host_vcpu) == ESR_ELx_EC_HVC64) {
                unsigned long func_id = smccc_get_function(host_vcpu);
 
-               handle_host_hcall(func_id, host_vcpu);
+               if (func_id < HVC_STUB_HCALL_NR)
+                       handle_stub_hvc(func_id, host_vcpu);
+               else
+                       handle_host_hcall(func_id, host_vcpu);
        }
 
        /* Other traps are ignored. */
-- 
2.27.0.389.gc38d7665816-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to