Since the host is called from the hyp run loop, we can use that context
to identify calls from the host rather than checking VTTBR_EL2, which
will be used for the host's stage 2 in future.

Moving this to C also allows for more flexibiliy e.g. in applying
policies, such as forbidding HVC_RESET_VECTORS, based on the current
state of the hypervisor and removes the special casing for nVHE in the
exception handler.

Control over arch workaround 2 is made available to the host, the same
as any other vcpu.

Signed-off-by: Andrew Scull <[email protected]>
---
 arch/arm64/kvm/hyp/hyp-entry.S     |  1 -
 arch/arm64/kvm/hyp/nvhe/host.S     | 26 +--------------
 arch/arm64/kvm/hyp/nvhe/hyp-init.S | 13 ++++++--
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 51 +++++++++++++++++++++++++++++-
 4 files changed, 62 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index b7a1ea221f6e..db54f84003ec 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -24,7 +24,6 @@ el1_sync:                             // Guest trapped into 
EL2
        ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
        b.ne    el1_trap
 
-el1_hvc_guest:
        /*
         * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
         * The workaround has already been applied on the host,
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 40620c1c87b8..68d425b7bd62 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -95,31 +95,7 @@ SYM_FUNC_END(__hyp_do_panic)
 
 .macro valid_host_el1_sync_vect
        .align 7
-       stp     x0, x1, [sp, #-16]!
-       mrs     x0, esr_el2
-       lsr     x0, x0, #ESR_ELx_EC_SHIFT
-       cmp     x0, #ESR_ELx_EC_HVC64
-       ldp     x0, x1, [sp], #16
-       b.ne    __host_exit
-
-       /* Check for a stub HVC call */
-       cmp     x0, #HVC_STUB_HCALL_NR
-       b.hs    __host_exit
-
-       /*
-        * Compute the idmap address of __kvm_handle_stub_hvc and
-        * jump there. Since we use kimage_voffset, do not use the
-        * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
-        * (by loading it from the constant pool).
-        *
-        * Preserve x0-x4, which may contain stub parameters.
-        */
-       ldr     x5, =__kvm_handle_stub_hvc
-       ldr_l   x6, kimage_voffset
-
-       /* x5 = __pa(x5) */
-       sub     x5, x5, x6
-       br      x5
+       b       __host_exit
 .endm
 
 .macro invalid_host_el2_vect
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S 
b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 47224dc62c51..40b5aa95c194 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -16,6 +16,9 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+#include <asm/kvm_asm.h>
+#include <asm/kvm_ptrauth.h>
+
        .text
        .pushsection    .hyp.idmap.text, "ax"
 
@@ -138,6 +141,7 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
        cmp     x0, #HVC_SOFT_RESTART
        b.ne    1f
 
+SYM_INNER_LABEL(__kvm_handle_stub_hvc_soft_restart, SYM_L_GLOBAL)
        /* This is where we're about to jump, staying at EL2 */
        msr     elr_el2, x1
        mov     x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | 
PSR_MODE_EL2h)
@@ -150,8 +154,13 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
        b       reset
 
 1:     cmp     x0, #HVC_RESET_VECTORS
-       b.ne    1f
+       b.ne    3f
+       b       2f
+
+SYM_INNER_LABEL(__kvm_handle_stub_hvc_reset_vectors, SYM_L_GLOBAL)
+       restore_callee_saved_regs x5
 
+2:
        /*
         * Set the HVC_RESET_VECTORS return code before entering the common
         * path so that we do not clobber x0-x2 in case we are coming via
@@ -172,7 +181,7 @@ reset:
        msr     vbar_el2, x5
        eret
 
-1:     /* Bad stub call */
+3:     /* Bad stub call */
        mov_q   x0, HVC_STUB_ERR
        eret
 
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 78d7afcefbb8..302333ea96eb 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -14,6 +14,52 @@
 
 #include <kvm/arm_hypercalls.h>
 
+typedef __noreturn unsigned long (*stub_hvc_handler_t)
+       (unsigned long, unsigned long, unsigned long, unsigned long,
+        unsigned long, struct kvm_cpu_context *);
+
+extern char __kvm_handle_stub_hvc_soft_restart[];
+extern char __kvm_handle_stub_hvc_reset_vectors[];
+
+static void handle_stub_hvc(unsigned long func_id,
+                           struct kvm_cpu_context *host_ctxt)
+{
+       char *stub_hvc_handler_kern_va;
+       stub_hvc_handler_t stub_hvc_handler;
+
+       /*
+        * The handlers of the supported stub HVCs disable the MMU so they must
+        * be called in the idmap. We compute the idmap address by subtracting
+        * kimage_voffset from the kernel VA handler.
+        */
+       switch (func_id) {
+       case HVC_SOFT_RESTART:
+               asm volatile("ldr %0, =%1"
+                            : "=r" (stub_hvc_handler_kern_va)
+                            : "S" (__kvm_handle_stub_hvc_soft_restart));
+               break;
+       case HVC_RESET_VECTORS:
+               asm volatile("ldr %0, =%1"
+                            : "=r" (stub_hvc_handler_kern_va)
+                            : "S" (__kvm_handle_stub_hvc_reset_vectors));
+               break;
+       default:
+               host_ctxt->regs.regs[0] = HVC_STUB_ERR;
+               return;
+       }
+
+       stub_hvc_handler = (stub_hvc_handler_t)
+               (stub_hvc_handler_kern_va - kimage_voffset);
+
+       /* Preserve x0-x4, which may contain stub parameters. */
+       stub_hvc_handler(func_id,
+                        host_ctxt->regs.regs[1],
+                        host_ctxt->regs.regs[2],
+                        host_ctxt->regs.regs[3],
+                        host_ctxt->regs.regs[4],
+                        host_ctxt);
+}
+
 static void handle_host_hcall(unsigned long func_id,
                              struct kvm_cpu_context *host_ctxt)
 {
@@ -118,5 +164,8 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) {
                hyp_panic();
 
        func_id = host_ctxt->regs.regs[0];
-       handle_host_hcall(func_id, host_ctxt);
+       if (func_id < HVC_STUB_HCALL_NR)
+               handle_stub_hvc(func_id, host_ctxt);
+       else
+               handle_host_hcall(func_id, host_ctxt);
 }
-- 
2.28.0.220.ged08abb693-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to