Function vm_event_is_enabled() is introduced to check if vm event is enabled,
and also make the checking conditional upon CONFIG_VM_EVENT, which could help
DCE a lot calls/codes, such as hvm_monitor_io(), etc when VM_EVENT=n.
In-place assertion of arch.vm_event is kinds of redundant and could be
removed.

Signed-off-by: Penny Zheng <penny.zh...@amd.com>
---
 xen/arch/x86/hvm/emulate.c          |  6 ++---
 xen/arch/x86/hvm/hvm.c              | 41 +++++++++++++----------------
 xen/arch/x86/hvm/svm/intr.c         |  2 +-
 xen/arch/x86/hvm/vmx/intr.c         |  2 +-
 xen/arch/x86/include/asm/vm_event.h |  9 +++++++
 5 files changed, 33 insertions(+), 27 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2af4f30359..75567db403 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -105,7 +105,7 @@ static int set_context_data(void *buffer, unsigned int size)
 {
     struct vcpu *curr = current;
 
-    if ( curr->arch.vm_event )
+    if ( vm_event_is_enabled(curr) )
     {
         unsigned int safe_size =
             min(size, curr->arch.vm_event->emul.read.size);
@@ -771,7 +771,7 @@ static void *hvmemul_map_linear_addr(
             ASSERT(p2mt == p2m_ram_logdirty || !p2m_is_readonly(p2mt));
         }
 
-        if ( unlikely(curr->arch.vm_event) &&
+        if ( unlikely(vm_event_is_enabled(curr)) &&
              curr->arch.vm_event->send_event &&
              hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
         {
@@ -1870,7 +1870,7 @@ static int hvmemul_rep_outs_set_context(
     int rc = X86EMUL_OKAY;
 
     ASSERT(bytes_per_rep <= 4);
-    if ( !ev )
+    if ( !vm_event_is_enabled(current) )
         return X86EMUL_UNHANDLEABLE;
 
     ptr = ev->emul.read.data;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a..e3abd2849a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -532,7 +532,7 @@ void hvm_do_resume(struct vcpu *v)
     if ( !vcpu_ioreq_handle_completion(v) )
         return;
 
-    if ( unlikely(v->arch.vm_event) )
+    if ( unlikely(vm_event_is_enabled(v)) )
         hvm_vm_event_do_resume(v);
 
     /* Inject pending hw/sw event */
@@ -546,11 +546,12 @@ void hvm_do_resume(struct vcpu *v)
         v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
     }
 
-    if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
+    if ( unlikely(vm_event_is_enabled(v)) &&
+         v->arch.monitor.next_interrupt_enabled )
     {
         struct x86_event info;
 
-        if ( hvm_get_pending_event(v, &info) )
+        if ( hvm_get_pending_event(v, &info) && vm_event_is_enabled(v) )
         {
             hvm_monitor_interrupt(info.vector, info.type, info.error_code,
                                   info.cr2);
@@ -2088,7 +2089,7 @@ int hvm_handle_xsetbv(u32 index, u64 new_bv)
 {
     int rc;
 
-    if ( index == 0 )
+    if ( index == 0 && vm_event_is_enabled(current) )
         hvm_monitor_crX(XCR0, new_bv, current->arch.xcr0);
 
     rc = x86emul_write_xcr(index, new_bv, NULL);
@@ -2337,9 +2338,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
     if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
     {
-        ASSERT(v->arch.vm_event);
-
-        if ( hvm_monitor_crX(CR0, value, old_value) )
+        if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR0, value, old_value) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
             v->arch.vm_event->write_data.do_write.cr0 = 1;
@@ -2462,9 +2461,8 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool 
may_defer)
     if ( may_defer && unlikely(currd->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
     {
-        ASSERT(curr->arch.vm_event);
-
-        if ( hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
+        if ( vm_event_is_enabled(curr) &&
+             hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
             curr->arch.vm_event->write_data.do_write.cr3 = 1;
@@ -2544,9 +2542,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer)
     if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
     {
-        ASSERT(v->arch.vm_event);
-
-        if ( hvm_monitor_crX(CR4, value, old_cr) )
+        if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR4, value, old_cr) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
             v->arch.vm_event->write_data.do_write.cr4 = 1;
@@ -3407,7 +3403,7 @@ static enum hvm_translation_result __hvm_copy(
             return HVMTRANS_bad_gfn_to_mfn;
         }
 
-        if ( unlikely(v->arch.vm_event) &&
+        if ( unlikely(vm_event_is_enabled(v)) &&
              (flags & HVMCOPY_linear) &&
              v->arch.vm_event->send_event &&
              hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
@@ -3538,6 +3534,7 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned 
int inst_len)
     struct vcpu *curr = current;
     unsigned int leaf = regs->eax, subleaf = regs->ecx;
     struct cpuid_leaf res;
+    int ret = 0;
 
     if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
          hvm_get_cpl(curr) > 0 )
@@ -3554,7 +3551,10 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, 
unsigned int inst_len)
     regs->rcx = res.c;
     regs->rdx = res.d;
 
-    return hvm_monitor_cpuid(inst_len, leaf, subleaf);
+    if ( vm_event_is_enabled(curr) )
+        ret = hvm_monitor_cpuid(inst_len, leaf, subleaf);
+
+    return ret;
 }
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
@@ -3694,9 +3694,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
         if ( ret != X86EMUL_OKAY )
             return ret;
 
-        ASSERT(v->arch.vm_event);
-
-        if ( hvm_monitor_msr(msr, msr_content, msr_old_content) )
+        if ( vm_event_is_enabled(v) &&
+             hvm_monitor_msr(msr, msr_content, msr_old_content) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
             v->arch.vm_event->write_data.do_write.msr = 1;
@@ -3854,12 +3853,10 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
 
-    if ( currd->arch.monitor.descriptor_access_enabled )
-    {
-        ASSERT(curr->arch.vm_event);
+    if ( currd->arch.monitor.descriptor_access_enabled &&
+         vm_event_is_enabled(curr) )
         hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
                                       descriptor, is_write);
-    }
     else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
         domain_crash(currd);
 
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 46186a1102..557ebc98d8 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -130,7 +130,7 @@ void asmlinkage svm_intr_assist(void)
     enum hvm_intblk intblk;
 
     /* Block event injection while handling a sync vm_event. */
-    if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+    if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
         return;
 
     /* Crank the handle on interrupt state. */
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index b35dc8c586..a8ced95871 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -239,7 +239,7 @@ void asmlinkage vmx_intr_assist(void)
     }
 
     /* Block event injection while handling a sync vm_event. */
-    if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+    if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
         return;
 
 #ifdef CONFIG_MEM_SHARING
diff --git a/xen/arch/x86/include/asm/vm_event.h 
b/xen/arch/x86/include/asm/vm_event.h
index 46e77ed6d9..446d02c7d5 100644
--- a/xen/arch/x86/include/asm/vm_event.h
+++ b/xen/arch/x86/include/asm/vm_event.h
@@ -45,4 +45,13 @@ void vm_event_sync_event(struct vcpu *v, bool value);
 
 void vm_event_reset_vmtrace(struct vcpu *v);
 
+static inline bool vm_event_is_enabled(struct vcpu *v)
+{
+#ifdef CONFIG_VM_EVENT
+    return v->arch.vm_event != NULL;
+#else
+    return false;
+#endif
+}
+
 #endif /* __ASM_X86_VM_EVENT_H__ */
-- 
2.34.1


Reply via email to