Feature memory access is based on vm event subsystem, and it could be disabled in the future. So a few switch-blocks in do_altp2m_op() need vm_event_is_enabled() condition check to pass compilation when ALTP2M=y and VM_EVENT=n(, hence MEM_ACCESS=n), like HVMOP_altp2m_set_mem_access, etc. Function p2m_mem_access_check() still needs stub when VM_EVENT=n to pass compilation. Although local variable "req_ptr" still remains NULL throughout its lifetime, with the change of NULL assignment, we will face runtime undefined error only when CONFIG_USBAN is on. So we strengthen the condition check via adding vm_event_is_enabled() for the special case.
Signed-off-by: Penny Zheng <[email protected]> --- v1 -> v2: - a comment next to the excessive condition - use vm_event_is_enabled() instead - avoid heavy churn by using the inverted condition plus break --- xen/arch/x86/hvm/hvm.c | 24 +++++++++++++++++++++++- xen/arch/x86/include/asm/mem_access.h | 10 ++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index aa14101241..0103f5c6ba 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2080,7 +2080,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, #endif } - if ( req_ptr ) + /* + * Excessive condition is to avoid runtime undefined error only + * when CONFIG_USBAN=y + */ + if ( req_ptr && vm_event_is_enabled(curr) ) { if ( monitor_traps(curr, sync, req_ptr) < 0 ) rc = 0; @@ -4802,6 +4806,12 @@ static int do_altp2m_op( break; case HVMOP_altp2m_set_mem_access: + if ( !vm_event_is_enabled(current) ) + { + rc = -EOPNOTSUPP; + break; + } + if ( a.u.mem_access.pad ) rc = -EINVAL; else @@ -4811,6 +4821,12 @@ static int do_altp2m_op( break; case HVMOP_altp2m_set_mem_access_multi: + if ( !vm_event_is_enabled(current) ) + { + rc = -EOPNOTSUPP; + break; + } + if ( a.u.set_mem_access_multi.pad || a.u.set_mem_access_multi.opaque > a.u.set_mem_access_multi.nr ) { @@ -4842,6 +4858,12 @@ static int do_altp2m_op( break; case HVMOP_altp2m_get_mem_access: + if ( !vm_event_is_enabled(current) ) + { + rc = -EOPNOTSUPP; + break; + } + if ( a.u.mem_access.pad ) rc = -EINVAL; else diff --git a/xen/arch/x86/include/asm/mem_access.h b/xen/arch/x86/include/asm/mem_access.h index 257ed33de1..790bed81e8 100644 --- a/xen/arch/x86/include/asm/mem_access.h +++ b/xen/arch/x86/include/asm/mem_access.h @@ -14,6 +14,7 @@ #ifndef __ASM_X86_MEM_ACCESS_H__ #define __ASM_X86_MEM_ACCESS_H__ +#ifdef CONFIG_VM_EVENT /* * Setup vm_event request based on the access (gla is -1ull if not available). * Handles the rw2rx conversion. Boolean return value indicates if event type @@ -25,6 +26,15 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, struct npfec npfec, struct vm_event_st **req_ptr); +#else +static inline bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, + struct npfec npfec, + struct vm_event_st **req_ptr) +{ + *req_ptr = NULL; + return false; +} +#endif /* CONFIG_VM_EVENT */ /* Check for emulation and mark vcpu for skipping one instruction * upon rescheduling if required. */ -- 2.34.1
