Make the hypervisor reset either the whole tracing buffer or a specific ring-buffer, on remotes/hypervisor/trace or per_cpu/<cpu>/trace write access.
Signed-off-by: Vincent Donnefort <[email protected]> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 375607c67285..a28b072125ec 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -94,6 +94,7 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___tracing_enable, __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, __KVM_HOST_SMCCC_FUNC___tracing_update_clock, + __KVM_HOST_SMCCC_FUNC___tracing_reset, }; #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h index fd641e1b1c23..44912869d184 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/trace.h +++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h @@ -12,6 +12,7 @@ void __tracing_unload(void); int __tracing_enable(bool enable); int __tracing_swap_reader(unsigned int cpu); void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc); +int __tracing_reset(unsigned int cpu); #else static inline void *tracing_reserve_entry(unsigned long length) { return NULL; } static inline void tracing_commit_entry(void) { } @@ -21,5 +22,6 @@ static inline void __tracing_unload(void) { } static inline int __tracing_enable(bool enable) { return -ENODEV; } static inline int __tracing_swap_reader(unsigned int cpu) { return -ENODEV; } static inline void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { } +static inline int __tracing_reset(unsigned int cpu) { return -ENODEV; } #endif #endif diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 45b8f70828de..f92e82cbfcb4 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -625,6 +625,13 @@ static void handle___tracing_update_clock(struct kvm_cpu_context *host_ctxt) cpu_reg(host_ctxt, 1) = 0; } +static void handle___tracing_reset(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(unsigned int, cpu, host_ctxt, 1); + + cpu_reg(host_ctxt, 1) = __tracing_reset(cpu); +} + typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x @@ -671,6 +678,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__tracing_enable), HANDLE_FUNC(__tracing_swap_reader), HANDLE_FUNC(__tracing_update_clock), + HANDLE_FUNC(__tracing_reset), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c index 97e9f6c1a52c..93475cc36640 100644 --- a/arch/arm64/kvm/hyp/nvhe/trace.c +++ b/arch/arm64/kvm/hyp/nvhe/trace.c @@ -287,3 +287,20 @@ void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) /* ...we can now override the old one and swap. */ trace_clock_update(mult, shift, epoch_ns, epoch_cyc); } + +int __tracing_reset(unsigned int cpu) +{ + int ret = -ENODEV; + + if (cpu >= hyp_nr_cpus) + return -EINVAL; + + hyp_spin_lock(&trace_buffer.lock); + + if (hyp_trace_buffer_loaded(&trace_buffer)) + ret = simple_ring_buffer_reset(per_cpu_ptr(trace_buffer.simple_rbs, cpu)); + + hyp_spin_unlock(&trace_buffer.lock); + + return ret; +} diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c index 1e5fc27f0e9d..09bc192e3514 100644 --- a/arch/arm64/kvm/hyp_trace.c +++ b/arch/arm64/kvm/hyp_trace.c @@ -313,7 +313,7 @@ static int hyp_trace_swap_reader_page(unsigned int cpu, void *priv) static int hyp_trace_reset(unsigned int cpu, void *priv) { - return 0; + return kvm_call_hyp_nvhe(__tracing_reset, cpu); } static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv) -- 2.52.0.107.ga0afd4fd5b-goog
