Housekeeping: Don't forget to account the fast path. This still fits into the interrupt vector entry.
Signed-off-by: Ralf Ramsauer <[email protected]> --- hypervisor/arch/arm64/asm-defines.c | 3 +++ hypervisor/arch/arm64/entry.S | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/hypervisor/arch/arm64/asm-defines.c b/hypervisor/arch/arm64/asm-defines.c index ce8e224d..1fcffcc3 100644 --- a/hypervisor/arch/arm64/asm-defines.c +++ b/hypervisor/arch/arm64/asm-defines.c @@ -38,6 +38,9 @@ void common(void) DEFINE(CPU_STAT_VMEXITS_TOTAL, LOCAL_CPU_BASE + __builtin_offsetof(struct per_cpu, public.stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL])); + DEFINE(CPU_STAT_VMEXITS_SMCCC, LOCAL_CPU_BASE + + __builtin_offsetof(struct per_cpu, + public.stats[JAILHOUSE_CPU_STAT_VMEXITS_SMCCC])); BLANK(); DEFINE(DCACHE_CLEAN_ASM, DCACHE_CLEAN); diff --git a/hypervisor/arch/arm64/entry.S b/hypervisor/arch/arm64/entry.S index ced5d809..167e29d2 100644 --- a/hypervisor/arch/arm64/entry.S +++ b/hypervisor/arch/arm64/entry.S @@ -26,6 +26,9 @@ vmexits_total: .quad CPU_STAT_VMEXITS_TOTAL +vmexits_smccc: + .quad CPU_STAT_VMEXITS_SMCCC + /* x11 must contain the virt-to-phys offset */ .macro virt2phys, register add \register, \register, x11 @@ -364,6 +367,13 @@ el1_trap: add x0, x0, #4 msr elr_el2, x0 + /* And don't forget to account the SMC exit */ + ldr x0, =vmexits_smccc + ldr x0, [x0] + ldr x1, [x0] + add x1, x1, #1 + str x1, [x0] + /* beam me up, we only need to restore x4 and sp */ ldr x4, [sp, #(2 * 16 + 1 * 8)] add sp, sp, #(16 * 16) -- 2.20.1 -- You received this message because you are subscribed to the Google Groups "Jailhouse" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. For more options, visit https://groups.google.com/d/optout.
