Dave Voutila <d...@sisu.io> writes:

> Greetings tech@,
>
> I'm looking for testers of the following diff which addresses spurious
> VMCS corruption on Intel VMX hosts. If since upgrading to 7.0 or
> following -current you've had guests die and errors about failure to
> vmresume, please try this diff. I've had it running with no issues on
> some hardware mlarkin has that was showing frequent issues, as well as
> on my own systems.
>

Friendly bump. Have had some testers, but looking for last call as I
carve this up into the 3-4 diffs I'm going to look to commit.

> If you do, please compile with WITNESS support. I'm temporarily using
> mutex(9) to help identify any possible sleeping locks when working with
> VMCS. I might swap back to rwlock(9) usage before commit as there's no
> reason we need to spin...it's just for leveraging WITNESS to see
> if/where we try acquiring a sleepable lock when we don't expect to.
>
> -- AMD Folks
>
> Even if you run on AMD SVM hardware, please try and look for any
> regressions. The only changes you should see are all vmm(4) messages
> after probe/attach are sent to syslog and not directly to the console
> via printf(9).
>
> -- What's changed? This diff is huge :(
>
> The simplest change is bumping up the vmclear ipi spin counter to a
> higher value (matching the one used by MP_LOCKDEBUG) as on slower
> hardware (like mlarkin's) can spin out when the host is under heavy
> load. This will be committed on its own, but worth including in this
> diff for testing.
>
> The major changes address sleep points in vmm(4) in critical paths
> related to VMCS state. Sleeping on any locks, such as the remote vmclear
> via ipi, may result in the scheduler stealing the process and moving it
> to a free cpu. Any subsequent vmresume/vmwrite/vmread instruction the
> VMCS needs to be remotely cleared and then reloaded otherwise they will
> fail.
>
> The diff either removes possible sleep points (swapping out printf(9)
> for log(9)) or performs a proper vmclear/vmptrld dance where we can't
> remove the sleep point (e.g. uvm_fault(9), km_alloc(9)).
>
> This diff is not ready for OK as it will need to be split into 2-3
> commits to keep manageable, but for testing really needs to be used in
> concert.
>
> THANKS!
>
> -dv
>
>
> diff refs/heads/master refs/heads/vmresume-vmx
> blob - cc189771bc76745cd391e9b2a58f46dd92aa32ce
> blob + d7c6c592b12b236d14ade863fd1e75d8effc179c
> --- sys/arch/amd64/amd64/cpu.c
> +++ sys/arch/amd64/amd64/cpu.c
> @@ -790,7 +790,7 @@ cpu_init_vmm(struct cpu_info *ci)
>                   &ci->ci_vmxon_region_pa))
>                       panic("Can't locate VMXON region in phys mem");
>               ci->ci_vmcs_pa = VMX_VMCS_PA_CLEAR;
> -             rw_init(&ci->ci_vmcs_lock, "vmcslock");
> +             mtx_init(&ci->ci_vmcs_mtx, IPL_MPFLOOR);
>       }
>  }
>  #endif /* NVMM > 0 */
> blob - 2535558cce5f07f4d6a150ce53b100524801755a
> blob + e335a1dc5e8a400b4bbf49cac2ec8853dffcdae3
> --- sys/arch/amd64/amd64/vmm.c
> +++ sys/arch/amd64/amd64/vmm.c
> @@ -29,6 +29,7 @@
>  #include <sys/pledge.h>
>  #include <sys/memrange.h>
>  #include <sys/tracepoint.h>
> +#include <sys/syslog.h>
>
>  #include <uvm/uvm_extern.h>
>
> @@ -47,14 +48,18 @@
>  void *l1tf_flush_region;
>
>  #ifdef VMM_DEBUG
> -#define DPRINTF(x...)        do { printf(x); } while(0)
> +#define DPRINTF(x...)        do { log(LOG_DEBUG, x); } while(0)
> +#define DEBUG_ADD(x...)      do { addlog(x); } while(0)
>  #else
>  #define DPRINTF(x...)
> +#define DEBUG_ADD(x...)
>  #endif /* VMM_DEBUG */
>
> +#define ERROR(x...)  do { log(LOG_ERR, x); } while(0)
> +
>  #define DEVNAME(s)  ((s)->sc_dev.dv_xname)
>
> -#define CTRL_DUMP(x,y,z) printf("     %s: Can set:%s Can clear:%s\n", #z , \
> +#define CTRL_DUMP(x,y,z) addlog("     %s: Can set:%s Can clear:%s\n", #z , \
>                               vcpu_vmx_check_cap(x, IA32_VMX_##y ##_CTLS, \
>                               IA32_VMX_##z, 1) ? "Yes" : "No", \
>                               vcpu_vmx_check_cap(x, IA32_VMX_##y ##_CTLS, \
> @@ -657,7 +662,7 @@ vm_resetcpu(struct vm_resetcpu_params *vrp)
>           vm->vm_id, vcpu->vc_id);
>
>       if (vcpu_reset_regs(vcpu, &vrp->vrp_init_state)) {
> -             printf("%s: failed\n", __func__);
> +             ERROR("%s: failed\n", __func__);
>  #ifdef VMM_DEBUG
>               dump_vcpu(vcpu);
>  #endif /* VMM_DEBUG */
> @@ -705,9 +710,7 @@ vm_intr_pending(struct vm_intr_params *vip)
>       if (vcpu == NULL)
>               return (ENOENT);
>
> -     rw_enter_write(&vcpu->vc_lock);
>       vcpu->vc_intr = vip->vip_intr;
> -     rw_exit_write(&vcpu->vc_lock);
>
>       return (0);
>  }
> @@ -810,11 +813,13 @@ vm_rwregs(struct vm_rwregs_params *vrwp, int dir)
>
>       rw_enter_write(&vcpu->vc_lock);
>       if (vmm_softc->mode == VMM_MODE_VMX ||
> -         vmm_softc->mode == VMM_MODE_EPT)
> +         vmm_softc->mode == VMM_MODE_EPT) {
> +             mtx_enter(&vcpu->vc_vmx_mtx);
>               ret = (dir == 0) ?
>                   vcpu_readregs_vmx(vcpu, vrwp->vrwp_mask, vrs) :
>                   vcpu_writeregs_vmx(vcpu, vrwp->vrwp_mask, 1, vrs);
> -     else if (vmm_softc->mode == VMM_MODE_SVM ||
> +             mtx_leave(&vcpu->vc_vmx_mtx);
> +     } else if (vmm_softc->mode == VMM_MODE_SVM ||
>           vmm_softc->mode == VMM_MODE_RVI)
>               ret = (dir == 0) ?
>                   vcpu_readregs_svm(vcpu, vrwp->vrwp_mask, vrs) :
> @@ -1000,7 +1005,7 @@ vmx_mprotect_ept(vm_map_t vm_map, paddr_t sgpa, paddr_
>                       ret = uvm_fault(vm_map, addr, VM_FAULT_WIRE,
>                           PROT_READ | PROT_WRITE | PROT_EXEC);
>                       if (ret)
> -                             printf("%s: uvm_fault returns %d, GPA=0x%llx\n",
> +                             ERROR("%s: uvm_fault returns %d, GPA=0x%llx\n",
>                                   __func__, ret, (uint64_t)addr);
>
>                       pte = vmx_pmap_find_pte_ept(pmap, addr);
> @@ -1181,7 +1186,7 @@ vmm_start(void)
>               for (i = 100000; (!(ci->ci_flags & CPUF_VMM)) && i>0;i--)
>                       delay(10);
>               if (!(ci->ci_flags & CPUF_VMM)) {
> -                     printf("%s: failed to enter VMM mode\n",
> +                     ERROR("%s: failed to enter VMM mode\n",
>                               ci->ci_dev->dv_xname);
>                       ret = EIO;
>               }
> @@ -1191,7 +1196,7 @@ vmm_start(void)
>       /* Start VMM on this CPU */
>       start_vmm_on_cpu(self);
>       if (!(self->ci_flags & CPUF_VMM)) {
> -             printf("%s: failed to enter VMM mode\n",
> +             ERROR("%s: failed to enter VMM mode\n",
>                       self->ci_dev->dv_xname);
>               ret = EIO;
>       }
> @@ -1229,7 +1234,7 @@ vmm_stop(void)
>               for (i = 100000; (ci->ci_flags & CPUF_VMM) && i>0 ;i--)
>                       delay(10);
>               if (ci->ci_flags & CPUF_VMM) {
> -                     printf("%s: failed to exit VMM mode\n",
> +                     ERROR("%s: failed to exit VMM mode\n",
>                               ci->ci_dev->dv_xname);
>                       ret = EIO;
>               }
> @@ -1239,7 +1244,7 @@ vmm_stop(void)
>       /* Stop VMM on this CPU */
>       stop_vmm_on_cpu(self);
>       if (self->ci_flags & CPUF_VMM) {
> -             printf("%s: failed to exit VMM mode\n",
> +             ERROR("%s: failed to exit VMM mode\n",
>                       self->ci_dev->dv_xname);
>               ret = EIO;
>       }
> @@ -1368,22 +1373,22 @@ vmclear_on_cpu(struct cpu_info *ci)
>  static int
>  vmx_remote_vmclear(struct cpu_info *ci, struct vcpu *vcpu)
>  {
> -     int ret = 0, nticks = 100000;
> +     int ret = 0, nticks = 200000000;
>
> -     rw_enter_write(&ci->ci_vmcs_lock);
> +     mtx_enter(&ci->ci_vmcs_mtx);
>       atomic_swap_ulong(&ci->ci_vmcs_pa, vcpu->vc_control_pa);
>       x86_send_ipi(ci, X86_IPI_VMCLEAR_VMM);
>
>       while (ci->ci_vmcs_pa != VMX_VMCS_PA_CLEAR) {
>               CPU_BUSY_CYCLE();
>               if (--nticks <= 0) {
> -                     printf("%s: spun out\n", __func__);
> +                     ERROR("%s: spun out\n", __func__);
>                       ret = 1;
>                       break;
>               }
>       }
>       atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED);
> -     rw_exit_write(&ci->ci_vmcs_lock);
> +     mtx_leave(&ci->ci_vmcs_mtx);
>
>       return (ret);
>  }
> @@ -1523,7 +1528,7 @@ vm_create(struct vm_create_params *vcp, struct proc *p
>       strncpy(vm->vm_name, vcp->vcp_name, VMM_MAX_NAME_LEN - 1);
>
>       if (vm_impl_init(vm, p)) {
> -             printf("failed to init arch-specific features for vm %p\n", vm);
> +             ERROR("failed to init arch-specific features for vm %p\n", vm);
>               vm_teardown(vm);
>               rw_exit_write(&vmm_softc->vm_lock);
>               return (ENOMEM);
> @@ -1541,7 +1546,7 @@ vm_create(struct vm_create_params *vcp, struct proc *p
>               vcpu = pool_get(&vcpu_pool, PR_WAITOK | PR_ZERO);
>               vcpu->vc_parent = vm;
>               if ((ret = vcpu_init(vcpu)) != 0) {
> -                     printf("failed to init vcpu %d for vm %p\n", i, vm);
> +                     ERROR("failed to init vcpu %d for vm %p\n", i, vm);
>                       vm_teardown(vm);
>                       vmm_softc->vm_idx--;
>                       rw_exit_write(&vmm_softc->vm_lock);
> @@ -1607,7 +1612,7 @@ vm_impl_init_vmx(struct vm *vm, struct proc *p)
>                   PROT_READ | PROT_WRITE | PROT_EXEC,
>                   &p->p_vmspace->vm_map, vmr->vmr_va, vmr->vmr_size);
>               if (ret) {
> -                     printf("%s: uvm_share failed (%d)\n", __func__, ret);
> +                     ERROR("%s: uvm_share failed (%d)\n", __func__, ret);
>                       /* uvmspace_free calls pmap_destroy for us */
>                       uvmspace_free(vm->vm_vmspace);
>                       vm->vm_vmspace = NULL;
> @@ -1617,7 +1622,7 @@ vm_impl_init_vmx(struct vm *vm, struct proc *p)
>
>       ret = pmap_convert(vm->vm_map->pmap, PMAP_TYPE_EPT);
>       if (ret) {
> -             printf("%s: pmap_convert failed\n", __func__);
> +             ERROR("%s: pmap_convert failed\n", __func__);
>               /* uvmspace_free calls pmap_destroy for us */
>               uvmspace_free(vm->vm_vmspace);
>               vm->vm_vmspace = NULL;
> @@ -1670,7 +1675,7 @@ vm_impl_init_svm(struct vm *vm, struct proc *p)
>                   PROT_READ | PROT_WRITE | PROT_EXEC,
>                   &p->p_vmspace->vm_map, vmr->vmr_va, vmr->vmr_size);
>               if (ret) {
> -                     printf("%s: uvm_share failed (%d)\n", __func__, ret);
> +                     ERROR("%s: uvm_share failed (%d)\n", __func__, ret);
>                       /* uvmspace_free calls pmap_destroy for us */
>                       uvmspace_free(vm->vm_vmspace);
>                       vm->vm_vmspace = NULL;
> @@ -1785,6 +1790,7 @@ vcpu_reload_vmcs_vmx(struct vcpu *vcpu)
>       struct cpu_info *ci, *last_ci;
>
>       rw_assert_wrlock(&vcpu->vc_lock);
> +     MUTEX_ASSERT_LOCKED(&vcpu->vc_vmx_mtx);
>
>       ci = curcpu();
>       last_ci = vcpu->vc_last_pcpu;
> @@ -1804,7 +1810,7 @@ vcpu_reload_vmcs_vmx(struct vcpu *vcpu)
>       }
>
>       if (vmptrld(&vcpu->vc_control_pa)) {
> -             printf("%s: vmptrld\n", __func__);
> +             ERROR("%s: vmptrld\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -1838,6 +1844,8 @@ vcpu_readregs_vmx(struct vcpu *vcpu, uint64_t regmask,
>       struct vcpu_segment_info *sregs = vrs->vrs_sregs;
>       struct vmx_msr_store *msr_store;
>
> +     MUTEX_ASSERT_LOCKED(&vcpu->vc_vmx_mtx);
> +
>  #ifdef VMM_DEBUG
>       /* VMCS should be loaded... */
>       paddr_t pa = 0ULL;
> @@ -2113,6 +2121,8 @@ vcpu_writeregs_vmx(struct vcpu *vcpu, uint64_t regmask
>       struct vcpu_segment_info *sregs = vrs->vrs_sregs;
>       struct vmx_msr_store *msr_store;
>
> +     MUTEX_ASSERT_LOCKED(&vcpu->vc_vmx_mtx);
> +
>       if (loadvmcs) {
>               if (vcpu_reload_vmcs_vmx(vcpu))
>                       return (EINVAL);
> @@ -2520,7 +2530,7 @@ svm_setmsrbr(struct vcpu *vcpu, uint32_t msr)
>               idx = SVM_MSRIDX(msr - 0xc0010000) + 0x1000;
>               msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0010000));
>       } else {
> -             printf("%s: invalid msr 0x%x\n", __func__, msr);
> +             ERROR("%s: invalid msr 0x%x\n", __func__, msr);
>               return;
>       }
>  }
> @@ -2561,7 +2571,7 @@ svm_setmsrbw(struct vcpu *vcpu, uint32_t msr)
>               idx = SVM_MSRIDX(msr - 0xc0010000) + 0x1000;
>               msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0010000));
>       } else {
> -             printf("%s: invalid msr 0x%x\n", __func__, msr);
> +             ERROR("%s: invalid msr 0x%x\n", __func__, msr);
>               return;
>       }
>  }
> @@ -2611,7 +2621,7 @@ vmx_setmsrbr(struct vcpu *vcpu, uint32_t msr)
>               idx = VMX_MSRIDX(msr - 0xc0000000) + 0x400;
>               msrs[idx] &= ~(VMX_MSRBIT(msr - 0xc0000000));
>       } else
> -             printf("%s: invalid msr 0x%x\n", __func__, msr);
> +             ERROR("%s: invalid msr 0x%x\n", __func__, msr);
>  }
>
>  /*
> @@ -2643,7 +2653,7 @@ vmx_setmsrbw(struct vcpu *vcpu, uint32_t msr)
>               idx = VMX_MSRIDX(msr - 0xc0000000) + 0xc00;
>               msrs[idx] &= ~(VMX_MSRBIT(msr - 0xc0000000));
>       } else
> -             printf("%s: invalid msr 0x%x\n", __func__, msr);
> +             ERROR("%s: invalid msr 0x%x\n", __func__, msr);
>  }
>
>  /*
> @@ -2744,6 +2754,7 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, struct vcpu_reg
>       struct vmx_msr_store *msr_store;
>
>       rw_assert_wrlock(&vcpu->vc_lock);
> +     mtx_enter(&vcpu->vc_vmx_mtx);
>
>       cr0 = vrs->vrs_crs[VCPU_REGS_CR0];
>
> @@ -3028,12 +3039,25 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, struct vcpu_reg
>           IA32_VMX_ACTIVATE_SECONDARY_CONTROLS, 1)) {
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_VPID, 1)) {
> +
> +                     /* We need to drop the mutex before acquiring a vpid */
> +                     vcpu->vc_last_pcpu = curcpu();
> +                     mtx_leave(&vcpu->vc_vmx_mtx);
> +
>                       if (vmm_alloc_vpid(&vpid)) {
>                               DPRINTF("%s: could not allocate VPID\n",
>                                   __func__);
>                               ret = EINVAL;
> +                             goto exit_no_mtx;
> +                     }
> +
> +                     mtx_enter(&vcpu->vc_vmx_mtx);
> +                     if (vcpu_reload_vmcs_vmx(vcpu)) {
> +                             ERROR("%s: failed to reload vmcs\n", __func__);
> +                             ret = EINVAL;
>                               goto exit;
>                       }
> +
>                       if (vmwrite(VMCS_GUEST_VPID, vpid)) {
>                               DPRINTF("%s: error setting guest VPID\n",
>                                   __func__);
> @@ -3297,6 +3321,8 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, struct vcpu_reg
>       atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED);
>
>  exit:
> +     mtx_leave(&vcpu->vc_vmx_mtx);
> +exit_no_mtx:
>       return (ret);
>  }
>
> @@ -3323,6 +3349,8 @@ vcpu_init_vmx(struct vcpu *vcpu)
>       uint32_t cr0, cr4;
>       int ret = 0;
>
> +     mtx_init(&vcpu->vc_vmx_mtx, IPL_MPFLOOR);
> +
>       /* Allocate VMCS VA */
>       vcpu->vc_control_va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_page, &kp_zero,
>           &kd_waitok);
> @@ -4459,7 +4487,7 @@ vmm_translate_gva(struct vcpu *vcpu, uint64_t va, uint
>               if (vcpu_readregs_svm(vcpu, VM_RWREGS_ALL, &vrs))
>                       return (EINVAL);
>       } else {
> -             printf("%s: unknown vmm mode", __func__);
> +             ERROR("%s: unknown vmm mode", __func__);
>               return (EINVAL);
>       }
>
> @@ -4598,10 +4626,13 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>       struct region_descriptor gdtr, idtr;
>
>       rw_assert_wrlock(&vcpu->vc_lock);
> +     MUTEX_ASSERT_UNLOCKED(&vcpu->vc_vmx_mtx);
> +     mtx_enter(&vcpu->vc_vmx_mtx);
>
>       if (vcpu_reload_vmcs_vmx(vcpu)) {
> -             printf("%s: failed (re)loading vmcs\n", __func__);
> -             return (EINVAL);
> +             log(LOG_ERR, "%s: failed (re)loading vmcs\n", __func__);
> +             ret = EINVAL;
> +             goto out;
>       }
>
>       /*
> @@ -4643,7 +4674,8 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                           vcpu->vc_id);
>                       vmx_vcpu_dump_regs(vcpu);
>                       dump_vcpu(vcpu);
> -                     return (EINVAL);
> +                     ret = EINVAL;
> +                     goto out;
>               default:
>                       DPRINTF("%s: unimplemented exit type %d (%s)\n",
>                           __func__,
> @@ -4659,39 +4691,44 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>
>       setregion(&gdt, ci->ci_gdt, GDT_SIZE - 1);
>       if (gdt.rd_base == 0) {
> -             printf("%s: setregion\n", __func__);
> -             return (EINVAL);
> +             ERROR("%s: setregion\n", __func__);
> +             ret = EINVAL;
> +             goto out;
>       }
>
>       /* Host GDTR base */
>       if (vmwrite(VMCS_HOST_IA32_GDTR_BASE, gdt.rd_base)) {
> -             printf("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
> +             ERROR("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
>                   VMCS_HOST_IA32_GDTR_BASE, gdt.rd_base);
> -             return (EINVAL);
> +             ret = EINVAL;
> +             goto out;
>       }
>
>       /* Host TR base */
>       if (vmwrite(VMCS_HOST_IA32_TR_BASE, (uint64_t)ci->ci_tss)) {
> -             printf("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
> +             ERROR("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
>                   VMCS_HOST_IA32_TR_BASE, (uint64_t)ci->ci_tss);
> -             return (EINVAL);
> +             ret = EINVAL;
> +             goto out;
>       }
>
>       /* Host CR3 */
>       cr3 = rcr3();
>       if (vmwrite(VMCS_HOST_IA32_CR3, cr3)) {
> -             printf("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
> +             ERROR("%s: vmwrite(0x%04X, 0x%llx)\n", __func__,
>                   VMCS_HOST_IA32_CR3, cr3);
> -             return (EINVAL);
> +             ret = EINVAL;
> +             goto out;
>       }
>
>       /* Handle vmd(8) injected interrupts */
>       /* Is there an interrupt pending injection? */
>       if (irq != 0xFFFF) {
>               if (vmread(VMCS_GUEST_INTERRUPTIBILITY_ST, &int_st)) {
> -                     printf("%s: can't get interruptibility state\n",
> +                     ERROR("%s: can't get interruptibility state\n",
>                           __func__);
> -                     return (EINVAL);
> +                     ret = EINVAL;
> +                     goto out;
>               }
>
>               /* Interruptbility state 0x3 covers NMIs and STI */
> @@ -4700,9 +4737,10 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                       eii |= (1ULL << 31);    /* Valid */
>                       eii |= (0ULL << 8);     /* Hardware Interrupt */
>                       if (vmwrite(VMCS_ENTRY_INTERRUPTION_INFO, eii)) {
> -                             printf("vcpu_run_vmx: can't vector "
> +                             ERROR("vcpu_run_vmx: can't vector "
>                                   "interrupt to guest\n");
> -                             return (EINVAL);
> +                             ret = EINVAL;
> +                             goto out;
>                       }
>
>                       irq = 0xFFFF;
> @@ -4712,15 +4750,17 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                * Disable window exiting
>                */
>               if (vmread(VMCS_PROCBASED_CTLS, &procbased)) {
> -                     printf("%s: can't read procbased ctls on exit\n",
> +                     ERROR("%s: can't read procbased ctls on exit\n",
>                           __func__);
> -                     return (EINVAL);
> +                     ret = EINVAL;
> +                     goto out;
>               } else {
>                       procbased &= ~IA32_VMX_INTERRUPT_WINDOW_EXITING;
>                       if (vmwrite(VMCS_PROCBASED_CTLS, procbased)) {
> -                             printf("%s: can't write procbased ctls "
> +                             ERROR("%s: can't write procbased ctls "
>                                   "on exit\n", __func__);
> -                             return (EINVAL);
> +                             ret = EINVAL;
> +                             goto out;
>                       }
>               }
>       }
> @@ -4753,14 +4793,14 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>
>                       eii |= (3ULL << 8);     /* Hardware Exception */
>                       if (vmwrite(VMCS_ENTRY_INTERRUPTION_INFO, eii)) {
> -                             printf("%s: can't vector event to guest\n",
> +                             ERROR("%s: can't vector event to guest\n",
>                                   __func__);
>                               ret = EINVAL;
>                               break;
>                       }
>
>                       if (vmwrite(VMCS_ENTRY_EXCEPTION_ERROR_CODE, 0)) {
> -                             printf("%s: can't write error code to guest\n",
> +                             ERROR("%s: can't write error code to guest\n",
>                                   __func__);
>                               ret = EINVAL;
>                               break;
> @@ -4823,7 +4863,7 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                           &vcpu->vc_gueststate.vg_rip, &exit_reason);
>                       if (vmread(VMCS_GUEST_IA32_RFLAGS,
>                           &vcpu->vc_gueststate.vg_rflags)) {
> -                             printf("%s: can't read guest rflags during "
> +                             ERROR("%s: can't read guest rflags during "
>                                   "exit\n", __func__);
>                               ret = EINVAL;
>                               break;
> @@ -4831,13 +4871,13 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>
>                       /* Update our state */
>                       if (!(exitinfo & VMX_EXIT_INFO_HAVE_RIP)) {
> -                             printf("%s: cannot read guest rip\n", __func__);
> +                             ERROR("%s: cannot read guest rip\n", __func__);
>                               ret = EINVAL;
>                               break;
>                       }
>
>                       if (!(exitinfo & VMX_EXIT_INFO_HAVE_REASON)) {
> -                             printf("%s: cant read exit reason\n", __func__);
> +                             ERROR("%s: cant read exit reason\n", __func__);
>                               ret = EINVAL;
>                               break;
>                       }
> @@ -4860,7 +4900,7 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                        */
>                       if (vcpu->vc_irqready == 0 && vcpu->vc_intr) {
>                               if (vmread(VMCS_PROCBASED_CTLS, &procbased)) {
> -                                     printf("%s: can't read procbased ctls "
> +                                     ERROR("%s: can't read procbased ctls "
>                                           "on intwin exit\n", __func__);
>                                       ret = EINVAL;
>                                       break;
> @@ -4868,7 +4908,7 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>
>                               procbased |= IA32_VMX_INTERRUPT_WINDOW_EXITING;
>                               if (vmwrite(VMCS_PROCBASED_CTLS, procbased)) {
> -                                     printf("%s: can't write procbased ctls "
> +                                     ERROR("%s: can't write procbased ctls "
>                                           "on intwin exit\n", __func__);
>                                       ret = EINVAL;
>                                       break;
> @@ -4900,19 +4940,19 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>                        */
>                       switch (ret) {
>                       case VMX_FAIL_LAUNCH_INVALID_VMCS:
> -                             printf("%s: failed %s with invalid vmcs\n",
> +                             ERROR("%s: failed %s with invalid vmcs\n",
>                                   __func__,
>                                   (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED
>                                       ? "vmresume" : "vmlaunch"));
>                               break;
>                       case VMX_FAIL_LAUNCH_VALID_VMCS:
> -                             printf("%s: failed %s with valid vmcs\n",
> +                             ERROR("%s: failed %s with valid vmcs\n",
>                                   __func__,
>                                   (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED
>                                       ? "vmresume" : "vmlaunch"));
>                               break;
>                       default:
> -                             printf("%s: failed %s for unknown reason\n",
> +                             ERROR("%s: failed %s for unknown reason\n",
>                                   __func__,
>                                   (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED
>                                       ? "vmresume" : "vmlaunch"));
> @@ -4922,10 +4962,10 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>
>                       /* Try to translate a vmfail error code, if possible. */
>                       if (vmread(VMCS_INSTRUCTION_ERROR, &insn_error)) {
> -                             printf("%s: can't read insn error field\n",
> +                             ERROR("%s: can't read insn error field\n",
>                                   __func__);
>                       } else
> -                             printf("%s: error code = %lld, %s\n", __func__,
> +                             ERROR("%s: error code = %lld, %s\n", __func__,
>                                   insn_error,
>                                   vmx_instruction_error_decode(insn_error));
>  #ifdef VMM_DEBUG
> @@ -4935,13 +4975,14 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *
>               }
>       }
>
> -     vcpu->vc_last_pcpu = curcpu();
> -
>       /* Copy the VCPU register state to the exit structure */
>       if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL, &vcpu->vc_exit.vrs))
>               ret = EINVAL;
>       vcpu->vc_exit.cpl = vmm_get_guest_cpu_cpl(vcpu);
>
> +out:
> +     vcpu->vc_last_pcpu = curcpu();
> +     mtx_leave(&vcpu->vc_vmx_mtx);
>       return (ret);
>  }
>
> @@ -4961,7 +5002,7 @@ vmx_handle_intr(struct vcpu *vcpu)
>       vaddr_t handler;
>
>       if (vmread(VMCS_EXIT_INTERRUPTION_INFO, &eii)) {
> -             printf("%s: can't obtain intr info\n", __func__);
> +             ERROR("%s: can't obtain intr info\n", __func__);
>               return;
>       }
>
> @@ -5027,12 +5068,12 @@ vmx_handle_hlt(struct vcpu *vcpu)
>       uint64_t insn_length, rflags;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
>       if (vmread(VMCS_GUEST_IA32_RFLAGS, &rflags)) {
> -             printf("%s: can't obtain guest rflags\n", __func__);
> +             ERROR("%s: can't obtain guest rflags\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5166,7 +5207,7 @@ svm_handle_exit(struct vcpu *vcpu)
>
>               if (rflags & PSL_T) {
>                       if (vmm_inject_db(vcpu)) {
> -                             printf("%s: can't inject #DB exception to "
> +                             ERROR("%s: can't inject #DB exception to "
>                                   "guest", __func__);
>                               return (EINVAL);
>                       }
> @@ -5283,13 +5324,13 @@ vmx_handle_exit(struct vcpu *vcpu)
>       if (update_rip) {
>               if (vmwrite(VMCS_GUEST_IA32_RIP,
>                   vcpu->vc_gueststate.vg_rip)) {
> -                     printf("%s: can't advance rip\n", __func__);
> +                     ERROR("%s: can't advance rip\n", __func__);
>                       return (EINVAL);
>               }
>
>               if (vmread(VMCS_GUEST_INTERRUPTIBILITY_ST,
>                   &istate)) {
> -                     printf("%s: can't read interruptibility state\n",
> +                     ERROR("%s: can't read interruptibility state\n",
>                           __func__);
>                       return (EINVAL);
>               }
> @@ -5299,14 +5340,14 @@ vmx_handle_exit(struct vcpu *vcpu)
>
>               if (vmwrite(VMCS_GUEST_INTERRUPTIBILITY_ST,
>                   istate)) {
> -                     printf("%s: can't write interruptibility state\n",
> +                     ERROR("%s: can't write interruptibility state\n",
>                           __func__);
>                       return (EINVAL);
>               }
>
>               if (rflags & PSL_T) {
>                       if (vmm_inject_db(vcpu)) {
> -                             printf("%s: can't inject #DB exception to "
> +                             ERROR("%s: can't inject #DB exception to "
>                                   "guest", __func__);
>                               return (EINVAL);
>                       }
> @@ -5423,7 +5464,7 @@ int
>  vmx_get_exit_qualification(uint64_t *exit_qualification)
>  {
>       if (vmread(VMCS_GUEST_EXIT_QUALIFICATION, exit_qualification)) {
> -             printf("%s: can't extract exit qual\n", __func__);
> +             ERROR("%s: can't extract exit qual\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5500,7 +5541,7 @@ svm_fault_page(struct vcpu *vcpu, paddr_t gpa)
>       ret = uvm_fault(vcpu->vc_parent->vm_map, gpa, VM_FAULT_WIRE,
>           PROT_READ | PROT_WRITE | PROT_EXEC);
>       if (ret)
> -             printf("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n",
> +             ERROR("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n",
>                   __func__, ret, (uint64_t)gpa, vcpu->vc_gueststate.vg_rip);
>
>       return (ret);
> @@ -5529,7 +5570,7 @@ svm_handle_np_fault(struct vcpu *vcpu)
>               ret = svm_fault_page(vcpu, gpa);
>               break;
>       default:
> -             printf("unknown memory type %d for GPA 0x%llx\n",
> +             ERROR("unknown memory type %d for GPA 0x%llx\n",
>                   gpa_memtype, gpa);
>               return (EINVAL);
>       }
> @@ -5560,7 +5601,7 @@ vmx_fault_page(struct vcpu *vcpu, paddr_t gpa)
>
>       fault_type = vmx_get_guest_faulttype();
>       if (fault_type == -1) {
> -             printf("%s: invalid fault type\n", __func__);
> +             ERROR("%s: invalid fault type\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5569,13 +5610,25 @@ vmx_fault_page(struct vcpu *vcpu, paddr_t gpa)
>               return (EAGAIN);
>       }
>
> -     KERNEL_LOCK();
> +     /*
> +      * We're holding a VMX specific mutex and must release it before
> +      * attempting to fault a page. Since we may sleep, we can't assume
> +      * we're on the same CPU as before.
> +      */
> +     vcpu->vc_last_pcpu = curcpu();
> +     mtx_leave(&vcpu->vc_vmx_mtx);
> +
>       ret = uvm_fault(vcpu->vc_parent->vm_map, gpa, VM_FAULT_WIRE,
>           PROT_READ | PROT_WRITE | PROT_EXEC);
> -     KERNEL_UNLOCK();
>
> +     mtx_enter(&vcpu->vc_vmx_mtx);
> +     if (vcpu_reload_vmcs_vmx(vcpu)) {
> +             ERROR("%s: failed to reload vmcs\n", __func__);
> +             return (EINVAL);
> +     }
> +
>       if (ret)
> -             printf("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n",
> +             ERROR("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n",
>                   __func__, ret, (uint64_t)gpa, vcpu->vc_gueststate.vg_rip);
>
>       return (ret);
> @@ -5595,7 +5648,7 @@ vmx_handle_np_fault(struct vcpu *vcpu)
>
>       ret = 0;
>       if (vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &gpa)) {
> -             printf("%s: cannot extract faulting pa\n", __func__);
> +             ERROR("%s: cannot extract faulting pa\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5605,7 +5658,7 @@ vmx_handle_np_fault(struct vcpu *vcpu)
>               ret = vmx_fault_page(vcpu, gpa);
>               break;
>       default:
> -             printf("unknown memory type %d for GPA 0x%llx\n",
> +             ERROR("unknown memory type %d for GPA 0x%llx\n",
>                   gpa_memtype, gpa);
>               return (EINVAL);
>       }
> @@ -5835,7 +5888,7 @@ vmx_handle_inout(struct vcpu *vcpu)
>       int ret;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5846,7 +5899,7 @@ vmx_handle_inout(struct vcpu *vcpu)
>       }
>
>       if (vmx_get_exit_qualification(&exit_qual)) {
> -             printf("%s: can't get exit qual\n", __func__);
> +             ERROR("%s: can't get exit qual\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5930,7 +5983,7 @@ vmx_load_pdptes(struct vcpu *vcpu)
>       int ret;
>
>       if (vmread(VMCS_GUEST_IA32_CR3, &cr3)) {
> -             printf("%s: can't read guest cr3\n", __func__);
> +             ERROR("%s: can't read guest cr3\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -5939,22 +5992,22 @@ vmx_load_pdptes(struct vcpu *vcpu)
>               DPRINTF("%s: nonmapped guest CR3, setting PDPTEs to 0\n",
>                   __func__);
>               if (vmwrite(VMCS_GUEST_PDPTE0, 0)) {
> -                     printf("%s: can't write guest PDPTE0\n", __func__);
> +                     ERROR("%s: can't write guest PDPTE0\n", __func__);
>                       return (EINVAL);
>               }
>
>               if (vmwrite(VMCS_GUEST_PDPTE1, 0)) {
> -                     printf("%s: can't write guest PDPTE1\n", __func__);
> +                     ERROR("%s: can't write guest PDPTE1\n", __func__);
>                       return (EINVAL);
>               }
>
>               if (vmwrite(VMCS_GUEST_PDPTE2, 0)) {
> -                     printf("%s: can't write guest PDPTE2\n", __func__);
> +                     ERROR("%s: can't write guest PDPTE2\n", __func__);
>                       return (EINVAL);
>               }
>
>               if (vmwrite(VMCS_GUEST_PDPTE3, 0)) {
> -                     printf("%s: can't write guest PDPTE3\n", __func__);
> +                     ERROR("%s: can't write guest PDPTE3\n", __func__);
>                       return (EINVAL);
>               }
>               return (0);
> @@ -5962,43 +6015,68 @@ vmx_load_pdptes(struct vcpu *vcpu)
>
>       ret = 0;
>
> -     cr3_host_virt = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, 
> &kd_waitok);
> +     /* We need to drop the mutex as km_alloc may sleep. */
> +     vcpu->vc_last_pcpu = curcpu();
> +     mtx_leave(&vcpu->vc_vmx_mtx);
> +
> +     cr3_host_virt = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
> +         &kd_waitok);
>       if (!cr3_host_virt) {
> -             printf("%s: can't allocate address for guest CR3 mapping\n",
> +             ERROR("%s: can't allocate address for guest CR3 mapping\n",
>                   __func__);
> +             mtx_enter(&vcpu->vc_vmx_mtx);
> +             /* XXX VMCS may be bogus */
>               return (ENOMEM);
>       }
> -
>       pmap_kenter_pa(cr3_host_virt, cr3_host_phys, PROT_READ);
>
> +     mtx_enter(&vcpu->vc_vmx_mtx);
> +     if (vcpu_reload_vmcs_vmx(vcpu)) {
> +             ERROR("%s: failed to reload vmcs\n", __func__);
> +             mtx_leave(&vcpu->vc_vmx_mtx);
> +             ret = EINVAL;
> +             goto exit;
> +     }
> +
>       pdptes = (pd_entry_t *)cr3_host_virt;
>       if (vmwrite(VMCS_GUEST_PDPTE0, pdptes[0])) {
> -             printf("%s: can't write guest PDPTE0\n", __func__);
> +             ERROR("%s: can't write guest PDPTE0\n", __func__);
>               ret = EINVAL;
>               goto exit;
>       }
>
>       if (vmwrite(VMCS_GUEST_PDPTE1, pdptes[1])) {
> -             printf("%s: can't write guest PDPTE1\n", __func__);
> +             ERROR("%s: can't write guest PDPTE1\n", __func__);
>               ret = EINVAL;
>               goto exit;
>       }
>
>       if (vmwrite(VMCS_GUEST_PDPTE2, pdptes[2])) {
> -             printf("%s: can't write guest PDPTE2\n", __func__);
> +             ERROR("%s: can't write guest PDPTE2\n", __func__);
>               ret = EINVAL;
>               goto exit;
>       }
>
>       if (vmwrite(VMCS_GUEST_PDPTE3, pdptes[3])) {
> -             printf("%s: can't write guest PDPTE3\n", __func__);
> +             ERROR("%s: can't write guest PDPTE3\n", __func__);
>               ret = EINVAL;
>               goto exit;
>       }
>
>  exit:
>       pmap_kremove(cr3_host_virt, PAGE_SIZE);
> +
> +     /* km_free can sleep, so we need to release our mutex */
> +     vcpu->vc_last_pcpu = curcpu();
> +     mtx_leave(&vcpu->vc_vmx_mtx);
> +
>       km_free((void *)cr3_host_virt, PAGE_SIZE, &kv_any, &kp_none);
> +
> +     mtx_enter(&vcpu->vc_vmx_mtx);
> +     if (vcpu_reload_vmcs_vmx(vcpu)) {
> +             ERROR("%s: failed to reload vmcs after km_free\n", __func__);
> +             ret = EINVAL;
> +     }
>       return (ret);
>  }
>
> @@ -6068,7 +6146,7 @@ vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r)
>       }
>
>       if (vmread(VMCS_GUEST_IA32_CR0, &oldcr0)) {
> -             printf("%s: can't read guest cr0\n", __func__);
> +             ERROR("%s: can't read guest cr0\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6076,7 +6154,7 @@ vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r)
>       r |= CR0_NE;
>
>       if (vmwrite(VMCS_GUEST_IA32_CR0, r)) {
> -             printf("%s: can't write guest cr0\n", __func__);
> +             ERROR("%s: can't write guest cr0\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6098,7 +6176,7 @@ vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r)
>               msr_store = (struct vmx_msr_store 
> *)vcpu->vc_vmx_msr_exit_save_va;
>
>               if (vmread(VMCS_ENTRY_CTLS, &ectls)) {
> -                     printf("%s: can't read entry controls", __func__);
> +                     ERROR("%s: can't read entry controls", __func__);
>                       return (EINVAL);
>               }
>
> @@ -6108,12 +6186,12 @@ vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r)
>                       ectls &= ~IA32_VMX_IA32E_MODE_GUEST;
>
>               if (vmwrite(VMCS_ENTRY_CTLS, ectls)) {
> -                     printf("%s: can't write entry controls", __func__);
> +                     ERROR("%s: can't write entry controls", __func__);
>                       return (EINVAL);
>               }
>
>               if (vmread(VMCS_GUEST_IA32_CR4, &cr4)) {
> -                     printf("%s: can't read guest cr4\n", __func__);
> +                     ERROR("%s: can't read guest cr4\n", __func__);
>                       return (EINVAL);
>               }
>
> @@ -6122,7 +6200,7 @@ vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r)
>                       ret = vmx_load_pdptes(vcpu);
>
>                       if (ret) {
> -                             printf("%s: updating PDPTEs failed\n", 
> __func__);
> +                             ERROR("%s: updating PDPTEs failed\n", __func__);
>                               return (ret);
>                       }
>               }
> @@ -6178,7 +6256,7 @@ vmx_handle_cr4_write(struct vcpu *vcpu, uint64_t r)
>       r |= CR4_VMXE;
>
>       if (vmwrite(VMCS_GUEST_IA32_CR4, r)) {
> -             printf("%s: can't write guest cr4\n", __func__);
> +             ERROR("%s: can't write guest cr4\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6197,12 +6275,12 @@ vmx_handle_cr(struct vcpu *vcpu)
>       uint8_t crnum, dir, reg;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
>       if (vmx_get_exit_qualification(&exit_qual)) {
> -             printf("%s: can't get exit qual\n", __func__);
> +             ERROR("%s: can't get exit qual\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6227,7 +6305,7 @@ vmx_handle_cr(struct vcpu *vcpu)
>                       case 2: r = vcpu->vc_gueststate.vg_rdx; break;
>                       case 3: r = vcpu->vc_gueststate.vg_rbx; break;
>                       case 4: if (vmread(VMCS_GUEST_IA32_RSP, &r)) {
> -                                     printf("%s: unable to read guest "
> +                                     ERROR("%s: unable to read guest "
>                                           "RSP\n", __func__);
>                                       return (EINVAL);
>                               }
> @@ -6302,7 +6380,7 @@ vmx_handle_rdmsr(struct vcpu *vcpu)
>       int ret;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6359,7 +6437,7 @@ vmx_handle_xsetbv(struct vcpu *vcpu)
>       int ret;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -6505,7 +6583,7 @@ vmx_handle_wrmsr(struct vcpu *vcpu)
>       int ret;
>
>       if (vmread(VMCS_INSTRUCTION_LENGTH, &insn_length)) {
> -             printf("%s: can't obtain instruction length\n", __func__);
> +             ERROR("%s: can't obtain instruction length\n", __func__);
>               return (EINVAL);
>       }
>
> @@ -7228,7 +7306,7 @@ vmm_alloc_vpid(uint16_t *vpid)
>               }
>       }
>
> -     printf("%s: no available %ss\n", __func__,
> +     ERROR("%s: no available %ss\n", __func__,
>           (sc->mode == VMM_MODE_EPT || sc->mode == VMM_MODE_VMX) ? "VPID" :
>           "ASID");
>
> @@ -7657,24 +7735,24 @@ vcpu_state_decode(u_int state)
>  void
>  dump_vcpu(struct vcpu *vcpu)
>  {
> -     printf("vcpu @ %p\n", vcpu);
> -     printf("    parent vm @ %p\n", vcpu->vc_parent);
> -     printf("    mode: ");
> +     DPRINTF("vcpu @ %p\n", vcpu);
> +     DEBUG_ADD("    parent vm @ %p\n", vcpu->vc_parent);
> +     DEBUG_ADD("    mode: ");
>       if (vcpu->vc_virt_mode == VMM_MODE_VMX ||
>           vcpu->vc_virt_mode == VMM_MODE_EPT) {
> -             printf("VMX\n");
> -             printf("    pinbased ctls: 0x%llx\n",
> +             DEBUG_ADD("VMX\n");
> +             DEBUG_ADD("    pinbased ctls: 0x%llx\n",
>                   vcpu->vc_vmx_pinbased_ctls);
> -             printf("    true pinbased ctls: 0x%llx\n",
> +             DEBUG_ADD("    true pinbased ctls: 0x%llx\n",
>                   vcpu->vc_vmx_true_pinbased_ctls);
>               CTRL_DUMP(vcpu, PINBASED, EXTERNAL_INT_EXITING);
>               CTRL_DUMP(vcpu, PINBASED, NMI_EXITING);
>               CTRL_DUMP(vcpu, PINBASED, VIRTUAL_NMIS);
>               CTRL_DUMP(vcpu, PINBASED, ACTIVATE_VMX_PREEMPTION_TIMER);
>               CTRL_DUMP(vcpu, PINBASED, PROCESS_POSTED_INTERRUPTS);
> -             printf("    procbased ctls: 0x%llx\n",
> +             DEBUG_ADD("    procbased ctls: 0x%llx\n",
>                   vcpu->vc_vmx_procbased_ctls);
> -             printf("    true procbased ctls: 0x%llx\n",
> +             DEBUG_ADD("    true procbased ctls: 0x%llx\n",
>                   vcpu->vc_vmx_true_procbased_ctls);
>               CTRL_DUMP(vcpu, PROCBASED, INTERRUPT_WINDOW_EXITING);
>               CTRL_DUMP(vcpu, PROCBASED, USE_TSC_OFFSETTING);
> @@ -7698,7 +7776,7 @@ dump_vcpu(struct vcpu *vcpu)
>               CTRL_DUMP(vcpu, PROCBASED, PAUSE_EXITING);
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS,
>                   IA32_VMX_ACTIVATE_SECONDARY_CONTROLS, 1)) {
> -                     printf("    procbased2 ctls: 0x%llx\n",
> +                     DEBUG_ADD("    procbased2 ctls: 0x%llx\n",
>                           vcpu->vc_vmx_procbased2_ctls);
>                       CTRL_DUMP(vcpu, PROCBASED2, VIRTUALIZE_APIC);
>                       CTRL_DUMP(vcpu, PROCBASED2, ENABLE_EPT);
> @@ -7725,9 +7803,9 @@ dump_vcpu(struct vcpu *vcpu)
>                       CTRL_DUMP(vcpu, PROCBASED2, ENABLE_XSAVES_XRSTORS);
>                       CTRL_DUMP(vcpu, PROCBASED2, ENABLE_TSC_SCALING);
>               }
> -             printf("    entry ctls: 0x%llx\n",
> +             DEBUG_ADD("    entry ctls: 0x%llx\n",
>                   vcpu->vc_vmx_entry_ctls);
> -             printf("    true entry ctls: 0x%llx\n",
> +             DEBUG_ADD("    true entry ctls: 0x%llx\n",
>                   vcpu->vc_vmx_true_entry_ctls);
>               CTRL_DUMP(vcpu, ENTRY, LOAD_DEBUG_CONTROLS);
>               CTRL_DUMP(vcpu, ENTRY, IA32E_MODE_GUEST);
> @@ -7738,9 +7816,9 @@ dump_vcpu(struct vcpu *vcpu)
>               CTRL_DUMP(vcpu, ENTRY, LOAD_IA32_EFER_ON_ENTRY);
>               CTRL_DUMP(vcpu, ENTRY, LOAD_IA32_BNDCFGS_ON_ENTRY);
>               CTRL_DUMP(vcpu, ENTRY, CONCEAL_VM_ENTRIES_FROM_PT);
> -             printf("    exit ctls: 0x%llx\n",
> +             DEBUG_ADD("    exit ctls: 0x%llx\n",
>                   vcpu->vc_vmx_exit_ctls);
> -             printf("    true exit ctls: 0x%llx\n",
> +             DEBUG_ADD("    true exit ctls: 0x%llx\n",
>                   vcpu->vc_vmx_true_exit_ctls);
>               CTRL_DUMP(vcpu, EXIT, SAVE_DEBUG_CONTROLS);
>               CTRL_DUMP(vcpu, EXIT, HOST_SPACE_ADDRESS_SIZE);
> @@ -7772,9 +7850,9 @@ vmx_dump_vmcs_field(uint16_t fieldid, const char *msg)
>       uint64_t val;
>
>
> -     DPRINTF("%s (0x%04x): ", msg, fieldid);
> +     DEBUG_ADD("%s (0x%04x): ", msg, fieldid);
>       if (vmread(fieldid, &val))
> -             DPRINTF("???? ");
> +             DEBUG_ADD("???? ");
>       else {
>               /*
>                * Field width encoding : bits 13:14
> @@ -7786,10 +7864,10 @@ vmx_dump_vmcs_field(uint16_t fieldid, const char *msg)
>                */
>               width = (fieldid >> 13) & 0x3;
>               switch (width) {
> -                     case 0: DPRINTF("0x%04llx ", val); break;
> +                     case 0: DEBUG_ADD("0x%04llx ", val); break;
>                       case 1:
> -                     case 3: DPRINTF("0x%016llx ", val); break;
> -                     case 2: DPRINTF("0x%08llx ", val);
> +                     case 3: DEBUG_ADD("0x%016llx ", val); break;
> +                     case 2: DEBUG_ADD("0x%08llx ", val);
>               }
>       }
>  }
> @@ -7808,19 +7886,19 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>       /* XXX save and load new vmcs, restore at end */
>
>       DPRINTF("--CURRENT VMCS STATE--\n");
> -     printf("VMCS launched: %s\n",
> +     DEBUG_ADD("VMCS launched: %s\n",
>           (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED) ? "Yes" : "No");
> -     DPRINTF("VMXON revision : 0x%x\n",
> +     DEBUG_ADD("VMXON revision : 0x%x\n",
>           curcpu()->ci_vmm_cap.vcc_vmx.vmx_vmxon_revision);
> -     DPRINTF("CR0 fixed0: 0x%llx\n",
> +     DEBUG_ADD("CR0 fixed0: 0x%llx\n",
>           curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed0);
> -     DPRINTF("CR0 fixed1: 0x%llx\n",
> +     DEBUG_ADD("CR0 fixed1: 0x%llx\n",
>           curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed1);
> -     DPRINTF("CR4 fixed0: 0x%llx\n",
> +     DEBUG_ADD("CR4 fixed0: 0x%llx\n",
>           curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0);
> -     DPRINTF("CR4 fixed1: 0x%llx\n",
> +     DEBUG_ADD("CR4 fixed1: 0x%llx\n",
>           curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1);
> -     DPRINTF("MSR table size: 0x%x\n",
> +     DEBUG_ADD("MSR table size: 0x%x\n",
>           512 * (curcpu()->ci_vmm_cap.vcc_vmx.vmx_msr_table_size + 1));
>
>       has_sec = vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS,
> @@ -7846,15 +7924,15 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>               }
>       }
>
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_ES_SEL, "G.ES");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CS_SEL, "G.CS");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SS_SEL, "G.SS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DS_SEL, "G.DS");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_FS_SEL, "G.FS");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GS_SEL, "G.GS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_LDTR_SEL, "LDTR");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_TR_SEL, "G.TR");
>
> @@ -7871,52 +7949,52 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>               }
>       }
>
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_ES_SEL, "H.ES");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_CS_SEL, "H.CS");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_SS_SEL, "H.SS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_DS_SEL, "H.DS");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_FS_SEL, "H.FS");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_GS_SEL, "H.GS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_IO_BITMAP_A, "I/O Bitmap A");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_IO_BITMAP_B, "I/O Bitmap B");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS,
>           IA32_VMX_USE_MSR_BITMAPS, 1)) {
>               vmx_dump_vmcs_field(VMCS_MSR_BITMAP_ADDRESS, "MSR Bitmap");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       vmx_dump_vmcs_field(VMCS_EXIT_STORE_MSR_ADDRESS, "Exit Store MSRs");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_EXIT_LOAD_MSR_ADDRESS, "Exit Load MSRs");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_ENTRY_LOAD_MSR_ADDRESS, "Entry Load MSRs");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_EXECUTIVE_VMCS_POINTER, "Exec VMCS Ptr");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (has_sec) {
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_PML, 1)) {
>                       vmx_dump_vmcs_field(VMCS_PML_ADDRESS, "PML Addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>       }
>
>       vmx_dump_vmcs_field(VMCS_TSC_OFFSET, "TSC Offset");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS,
>           IA32_VMX_USE_TPR_SHADOW, 1)) {
>               vmx_dump_vmcs_field(VMCS_VIRTUAL_APIC_ADDRESS,
>                   "Virtual APIC Addr");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (has_sec) {
> @@ -7924,7 +8002,7 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>                   IA32_VMX_VIRTUALIZE_APIC, 1)) {
>                       vmx_dump_vmcs_field(VMCS_APIC_ACCESS_ADDRESS,
>                           "APIC Access Addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>       }
>
> @@ -7932,7 +8010,7 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>           IA32_VMX_PROCESS_POSTED_INTERRUPTS, 1)) {
>               vmx_dump_vmcs_field(VMCS_POSTED_INTERRUPT_DESC,
>                   "Posted Int Desc Addr");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (has_sec) {
> @@ -7940,30 +8018,30 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>                   IA32_VMX_ENABLE_VM_FUNCTIONS, 1)) {
>                       vmx_dump_vmcs_field(VMCS_VM_FUNCTION_CONTROLS,
>                           "VM Function Controls");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_EPT, 1)) {
>                       vmx_dump_vmcs_field(VMCS_GUEST_IA32_EPTP,
>                           "EPT Pointer");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_VIRTUAL_INTERRUPT_DELIVERY, 1)) {
>                       vmx_dump_vmcs_field(VMCS_EOI_EXIT_BITMAP_0,
>                           "EOI Exit Bitmap 0");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_EOI_EXIT_BITMAP_1,
>                           "EOI Exit Bitmap 1");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_EOI_EXIT_BITMAP_2,
>                           "EOI Exit Bitmap 2");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_EOI_EXIT_BITMAP_3,
>                           "EOI Exit Bitmap 3");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
> @@ -7972,7 +8050,7 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>                       if (curcpu()->ci_vmm_cap.vcc_vmx.vmx_vm_func & 0x1) {
>                               vmx_dump_vmcs_field(VMCS_EPTP_LIST_ADDRESS,
>                                   "EPTP List Addr");
> -                             DPRINTF("\n");
> +                             DEBUG_ADD("\n");
>                       }
>               }
>
> @@ -7980,52 +8058,52 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>                   IA32_VMX_VMCS_SHADOWING, 1)) {
>                       vmx_dump_vmcs_field(VMCS_VMREAD_BITMAP_ADDRESS,
>                           "VMREAD Bitmap Addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_VMWRITE_BITMAP_ADDRESS,
>                           "VMWRITE Bitmap Addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_EPT_VIOLATION_VE, 1)) {
>                       vmx_dump_vmcs_field(VMCS_VIRTUALIZATION_EXC_ADDRESS,
>                           "#VE Addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_XSAVES_XRSTORS, 1)) {
>                       vmx_dump_vmcs_field(VMCS_XSS_EXITING_BITMAP,
>                           "XSS exiting bitmap addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_ENCLS_EXITING, 1)) {
>                       vmx_dump_vmcs_field(VMCS_ENCLS_EXITING_BITMAP,
>                           "Encls exiting bitmap addr");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_TSC_SCALING, 1)) {
>                       vmx_dump_vmcs_field(VMCS_TSC_MULTIPLIER,
>                           "TSC scaling factor");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_EPT, 1)) {
>                       vmx_dump_vmcs_field(VMCS_GUEST_PHYSICAL_ADDRESS,
>                           "Guest PA");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>       }
>
>       vmx_dump_vmcs_field(VMCS_LINK_POINTER, "VMCS Link Pointer");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DEBUGCTL, "Guest DEBUGCTL");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_ENTRY_CTLS,
>           IA32_VMX_LOAD_IA32_PAT_ON_ENTRY, 1) ||
> @@ -8033,7 +8111,7 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>           IA32_VMX_SAVE_IA32_PAT_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_GUEST_IA32_PAT,
>                   "Guest PAT");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_ENTRY_CTLS,
> @@ -8042,27 +8120,27 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>           IA32_VMX_SAVE_IA32_EFER_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_GUEST_IA32_EFER,
>                   "Guest EFER");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_ENTRY_CTLS,
>           IA32_VMX_LOAD_IA32_PERF_GLOBAL_CTRL_ON_ENTRY, 1)) {
>               vmx_dump_vmcs_field(VMCS_GUEST_IA32_PERF_GBL_CTRL,
>                   "Guest Perf Global Ctrl");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (has_sec) {
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_ENABLE_EPT, 1)) {
>                       vmx_dump_vmcs_field(VMCS_GUEST_PDPTE0, "Guest PDPTE0");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_GUEST_PDPTE1, "Guest PDPTE1");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_GUEST_PDPTE2, "Guest PDPTE2");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>                       vmx_dump_vmcs_field(VMCS_GUEST_PDPTE3, "Guest PDPTE3");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>       }
>
> @@ -8072,241 +8150,241 @@ vmx_dump_vmcs(struct vcpu *vcpu)
>           IA32_VMX_CLEAR_IA32_BNDCFGS_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_GUEST_IA32_BNDCFGS,
>                   "Guest BNDCFGS");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_EXIT_CTLS,
>           IA32_VMX_LOAD_IA32_PAT_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_HOST_IA32_PAT,
>                   "Host PAT");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_EXIT_CTLS,
>           IA32_VMX_LOAD_IA32_EFER_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_HOST_IA32_EFER,
>                   "Host EFER");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_EXIT_CTLS,
>           IA32_VMX_LOAD_IA32_PERF_GLOBAL_CTRL_ON_EXIT, 1)) {
>               vmx_dump_vmcs_field(VMCS_HOST_IA32_PERF_GBL_CTRL,
>                   "Host Perf Global Ctrl");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       vmx_dump_vmcs_field(VMCS_PINBASED_CTLS, "Pinbased Ctrls");
>       vmx_dump_vmcs_field(VMCS_PROCBASED_CTLS, "Procbased Ctrls");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_EXCEPTION_BITMAP, "Exception Bitmap");
>       vmx_dump_vmcs_field(VMCS_PF_ERROR_CODE_MASK, "#PF Err Code Mask");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_PF_ERROR_CODE_MATCH, "#PF Err Code Match");
>       vmx_dump_vmcs_field(VMCS_CR3_TARGET_COUNT, "CR3 Tgt Count");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_EXIT_CTLS, "Exit Ctrls");
>       vmx_dump_vmcs_field(VMCS_EXIT_MSR_STORE_COUNT, "Exit MSR Store Ct");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_EXIT_MSR_LOAD_COUNT, "Exit MSR Load Ct");
>       vmx_dump_vmcs_field(VMCS_ENTRY_CTLS, "Entry Ctrls");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_ENTRY_MSR_LOAD_COUNT, "Entry MSR Load Ct");
>       vmx_dump_vmcs_field(VMCS_ENTRY_INTERRUPTION_INFO, "Entry Int. Info");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_ENTRY_EXCEPTION_ERROR_CODE,
>           "Entry Ex. Err Code");
>       vmx_dump_vmcs_field(VMCS_ENTRY_INSTRUCTION_LENGTH, "Entry Insn Len");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS,
>           IA32_VMX_USE_TPR_SHADOW, 1)) {
>               vmx_dump_vmcs_field(VMCS_TPR_THRESHOLD, "TPR Threshold");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       if (has_sec) {
>               vmx_dump_vmcs_field(VMCS_PROCBASED2_CTLS, "2ndary Ctrls");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>               if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS,
>                   IA32_VMX_PAUSE_LOOP_EXITING, 1)) {
>                       vmx_dump_vmcs_field(VMCS_PLE_GAP, "PLE Gap");
>                       vmx_dump_vmcs_field(VMCS_PLE_WINDOW, "PLE Window");
>               }
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       vmx_dump_vmcs_field(VMCS_INSTRUCTION_ERROR, "Insn Error");
>       vmx_dump_vmcs_field(VMCS_EXIT_REASON, "Exit Reason");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_EXIT_INTERRUPTION_INFO, "Exit Int. Info");
>       vmx_dump_vmcs_field(VMCS_EXIT_INTERRUPTION_ERR_CODE,
>           "Exit Int. Err Code");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_IDT_VECTORING_INFO, "IDT vect info");
>       vmx_dump_vmcs_field(VMCS_IDT_VECTORING_ERROR_CODE,
>           "IDT vect err code");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_INSTRUCTION_LENGTH, "Insn Len");
>       vmx_dump_vmcs_field(VMCS_EXIT_INSTRUCTION_INFO, "Exit Insn Info");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_ES_LIMIT, "G. ES Lim");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CS_LIMIT, "G. CS Lim");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SS_LIMIT, "G. SS Lim");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DS_LIMIT, "G. DS Lim");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_FS_LIMIT, "G. FS Lim");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GS_LIMIT, "G. GS Lim");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_LDTR_LIMIT, "G. LDTR Lim");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_TR_LIMIT, "G. TR Lim");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GDTR_LIMIT, "G. GDTR Lim");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_IDTR_LIMIT, "G. IDTR Lim");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_ES_AR, "G. ES AR");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CS_AR, "G. CS AR");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SS_AR, "G. SS AR");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DS_AR, "G. DS AR");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_FS_AR, "G. FS AR");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GS_AR, "G. GS AR");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_LDTR_AR, "G. LDTR AR");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_TR_AR, "G. TR AR");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_INTERRUPTIBILITY_ST, "G. Int St.");
>       vmx_dump_vmcs_field(VMCS_GUEST_ACTIVITY_STATE, "G. Act St.");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_GUEST_SMBASE, "G. SMBASE");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SYSENTER_CS, "G. SYSENTER CS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PINBASED_CTLS,
>           IA32_VMX_ACTIVATE_VMX_PREEMPTION_TIMER, 1)) {
>               vmx_dump_vmcs_field(VMCS_VMX_PREEMPTION_TIMER_VAL,
>                   "VMX Preempt Timer");
> -             DPRINTF("\n");
> +             DEBUG_ADD("\n");
>       }
>
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_SYSENTER_CS, "H. SYSENTER CS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_CR0_MASK, "CR0 Mask");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_CR4_MASK, "CR4 Mask");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       vmx_dump_vmcs_field(VMCS_CR0_READ_SHADOW, "CR0 RD Shadow");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_CR4_READ_SHADOW, "CR4 RD Shadow");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>
>       /* We assume all CPUs have the same max CR3 target ct */
>       cr3_tgt_ct = curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr3_tgt_count;
> -     DPRINTF("Max CR3 target count: 0x%x\n", cr3_tgt_ct);
> +     DEBUG_ADD("Max CR3 target count: 0x%x\n", cr3_tgt_ct);
>       if (cr3_tgt_ct <= VMX_MAX_CR3_TARGETS) {
>               for (i = 0 ; i < cr3_tgt_ct; i++) {
>                       vmx_dump_vmcs_field(VMCS_CR3_TARGET_0 + (2 * i),
>                           "CR3 Target");
> -                     DPRINTF("\n");
> +                     DEBUG_ADD("\n");
>               }
>       } else {
> -             DPRINTF("(Bogus CR3 Target Count > %d", VMX_MAX_CR3_TARGETS);
> +             DEBUG_ADD("(Bogus CR3 Target Count > %d", VMX_MAX_CR3_TARGETS);
>       }
>
>       vmx_dump_vmcs_field(VMCS_GUEST_EXIT_QUALIFICATION, "G. Exit Qual");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_IO_RCX, "I/O RCX");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_IO_RSI, "I/O RSI");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_IO_RDI, "I/O RDI");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_IO_RIP, "I/O RIP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_LINEAR_ADDRESS, "G. Lin Addr");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CR0, "G. CR0");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CR3, "G. CR3");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CR4, "G. CR4");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_ES_BASE, "G. ES Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_CS_BASE, "G. CS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SS_BASE, "G. SS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DS_BASE, "G. DS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_FS_BASE, "G. FS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GS_BASE, "G. GS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_LDTR_BASE, "G. LDTR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_TR_BASE, "G. TR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_GDTR_BASE, "G. GDTR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_IDTR_BASE, "G. IDTR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_DR7, "G. DR7");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_RSP, "G. RSP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_RIP, "G. RIP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_RFLAGS, "G. RFLAGS");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_PENDING_DBG_EXC, "G. Pend Dbg Exc");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SYSENTER_ESP, "G. SYSENTER ESP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_GUEST_IA32_SYSENTER_EIP, "G. SYSENTER EIP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_CR0, "H. CR0");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_CR3, "H. CR3");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_CR4, "H. CR4");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_FS_BASE, "H. FS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_GS_BASE, "H. GS Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_TR_BASE, "H. TR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_GDTR_BASE, "H. GDTR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_IDTR_BASE, "H. IDTR Base");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_SYSENTER_ESP, "H. SYSENTER ESP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_SYSENTER_EIP, "H. SYSENTER EIP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_RSP, "H. RSP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>       vmx_dump_vmcs_field(VMCS_HOST_IA32_RIP, "H. RIP");
> -     DPRINTF("\n");
> +     DEBUG_ADD("\n");
>  }
>
>  /*
> @@ -8329,307 +8407,307 @@ vmx_vcpu_dump_regs(struct vcpu *vcpu)
>       DPRINTF("vcpu @ %p in %s mode\n", vcpu, vmm_decode_cpu_mode(vcpu));
>       i = vmm_get_guest_cpu_cpl(vcpu);
>       if (i == -1)
> -             DPRINTF(" CPL=unknown\n");
> +             DEBUG_ADD(" CPL=unknown\n");
>       else
> -             DPRINTF(" CPL=%d\n", i);
> -     DPRINTF(" rax=0x%016llx rbx=0x%016llx rcx=0x%016llx\n",
> +             DEBUG_ADD(" CPL=%d\n", i);
> +     DEBUG_ADD(" rax=0x%016llx rbx=0x%016llx rcx=0x%016llx\n",
>           vcpu->vc_gueststate.vg_rax, vcpu->vc_gueststate.vg_rbx,
>           vcpu->vc_gueststate.vg_rcx);
> -     DPRINTF(" rdx=0x%016llx rbp=0x%016llx rdi=0x%016llx\n",
> +     DEBUG_ADD(" rdx=0x%016llx rbp=0x%016llx rdi=0x%016llx\n",
>           vcpu->vc_gueststate.vg_rdx, vcpu->vc_gueststate.vg_rbp,
>           vcpu->vc_gueststate.vg_rdi);
> -     DPRINTF(" rsi=0x%016llx  r8=0x%016llx  r9=0x%016llx\n",
> +     DEBUG_ADD(" rsi=0x%016llx  r8=0x%016llx  r9=0x%016llx\n",
>           vcpu->vc_gueststate.vg_rsi, vcpu->vc_gueststate.vg_r8,
>           vcpu->vc_gueststate.vg_r9);
> -     DPRINTF(" r10=0x%016llx r11=0x%016llx r12=0x%016llx\n",
> +     DEBUG_ADD(" r10=0x%016llx r11=0x%016llx r12=0x%016llx\n",
>           vcpu->vc_gueststate.vg_r10, vcpu->vc_gueststate.vg_r11,
>           vcpu->vc_gueststate.vg_r12);
> -     DPRINTF(" r13=0x%016llx r14=0x%016llx r15=0x%016llx\n",
> +     DEBUG_ADD(" r13=0x%016llx r14=0x%016llx r15=0x%016llx\n",
>           vcpu->vc_gueststate.vg_r13, vcpu->vc_gueststate.vg_r14,
>           vcpu->vc_gueststate.vg_r15);
>
> -     DPRINTF(" rip=0x%016llx rsp=", vcpu->vc_gueststate.vg_rip);
> +     DEBUG_ADD(" rip=0x%016llx rsp=", vcpu->vc_gueststate.vg_rip);
>       if (vmread(VMCS_GUEST_IA32_RSP, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else
> -             DPRINTF("0x%016llx\n", r);
> +             DEBUG_ADD("0x%016llx\n", r);
>
> -     DPRINTF(" rflags=");
> +     DEBUG_ADD(" rflags=");
>       if (vmread(VMCS_GUEST_IA32_RFLAGS, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%016llx ", r);
> +             DEBUG_ADD("0x%016llx ", r);
>               vmm_decode_rflags(r);
>       }
>
> -     DPRINTF(" cr0=");
> +     DEBUG_ADD(" cr0=");
>       if (vmread(VMCS_GUEST_IA32_CR0, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%016llx ", r);
> +             DEBUG_ADD("0x%016llx ", r);
>               vmm_decode_cr0(r);
>       }
>
> -     DPRINTF(" cr2=0x%016llx\n", vcpu->vc_gueststate.vg_cr2);
> +     DEBUG_ADD(" cr2=0x%016llx\n", vcpu->vc_gueststate.vg_cr2);
>
> -     DPRINTF(" cr3=");
> +     DEBUG_ADD(" cr3=");
>       if (vmread(VMCS_GUEST_IA32_CR3, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%016llx ", r);
> +             DEBUG_ADD("0x%016llx ", r);
>               vmm_decode_cr3(r);
>       }
>
> -     DPRINTF(" cr4=");
> +     DEBUG_ADD(" cr4=");
>       if (vmread(VMCS_GUEST_IA32_CR4, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%016llx ", r);
> +             DEBUG_ADD("0x%016llx ", r);
>               vmm_decode_cr4(r);
>       }
>
> -     DPRINTF(" --Guest Segment Info--\n");
> +     DEBUG_ADD(" --Guest Segment Info--\n");
>
> -     DPRINTF(" cs=");
> +     DEBUG_ADD(" cs=");
>       if (vmread(VMCS_GUEST_IA32_CS_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_CS_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_CS_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_CS_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" ds=");
> +     DEBUG_ADD(" ds=");
>       if (vmread(VMCS_GUEST_IA32_DS_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_DS_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_DS_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_DS_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" es=");
> +     DEBUG_ADD(" es=");
>       if (vmread(VMCS_GUEST_IA32_ES_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_ES_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_ES_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_ES_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" fs=");
> +     DEBUG_ADD(" fs=");
>       if (vmread(VMCS_GUEST_IA32_FS_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_FS_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_FS_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_FS_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" gs=");
> +     DEBUG_ADD(" gs=");
>       if (vmread(VMCS_GUEST_IA32_GS_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_GS_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_GS_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_GS_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" ss=");
> +     DEBUG_ADD(" ss=");
>       if (vmread(VMCS_GUEST_IA32_SS_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx rpl=%lld", r, r & 0x3);
> +             DEBUG_ADD("0x%04llx rpl=%lld", r, r & 0x3);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_SS_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_SS_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_SS_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" tr=");
> +     DEBUG_ADD(" tr=");
>       if (vmread(VMCS_GUEST_IA32_TR_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx", r);
> +             DEBUG_ADD("0x%04llx", r);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_TR_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_TR_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_TR_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" gdtr base=");
> +     DEBUG_ADD(" gdtr base=");
>       if (vmread(VMCS_GUEST_IA32_GDTR_BASE, &r))
> -             DPRINTF("(error reading)   ");
> +             DEBUG_ADD("(error reading)   ");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_GDTR_LIMIT, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else
> -             DPRINTF("0x%016llx\n", r);
> +             DEBUG_ADD("0x%016llx\n", r);
>
> -     DPRINTF(" idtr base=");
> +     DEBUG_ADD(" idtr base=");
>       if (vmread(VMCS_GUEST_IA32_IDTR_BASE, &r))
> -             DPRINTF("(error reading)   ");
> +             DEBUG_ADD("(error reading)   ");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_IDTR_LIMIT, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else
> -             DPRINTF("0x%016llx\n", r);
> +             DEBUG_ADD("0x%016llx\n", r);
>
> -     DPRINTF(" ldtr=");
> +     DEBUG_ADD(" ldtr=");
>       if (vmread(VMCS_GUEST_IA32_LDTR_SEL, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%04llx", r);
> +             DEBUG_ADD("0x%04llx", r);
>
> -     DPRINTF(" base=");
> +     DEBUG_ADD(" base=");
>       if (vmread(VMCS_GUEST_IA32_LDTR_BASE, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" limit=");
> +     DEBUG_ADD(" limit=");
>       if (vmread(VMCS_GUEST_IA32_LDTR_LIMIT, &r))
> -             DPRINTF("(error reading)");
> +             DEBUG_ADD("(error reading)");
>       else
> -             DPRINTF("0x%016llx", r);
> +             DEBUG_ADD("0x%016llx", r);
>
> -     DPRINTF(" a/r=");
> +     DEBUG_ADD(" a/r=");
>       if (vmread(VMCS_GUEST_IA32_LDTR_AR, &r))
> -             DPRINTF("(error reading)\n");
> +             DEBUG_ADD("(error reading)\n");
>       else {
> -             DPRINTF("0x%04llx\n  ", r);
> +             DEBUG_ADD("0x%04llx\n  ", r);
>               vmm_segment_desc_decode(r);
>       }
>
> -     DPRINTF(" --Guest MSRs @ 0x%016llx (paddr: 0x%016llx)--\n",
> +     DEBUG_ADD(" --Guest MSRs @ 0x%016llx (paddr: 0x%016llx)--\n",
>           (uint64_t)vcpu->vc_vmx_msr_exit_save_va,
>           (uint64_t)vcpu->vc_vmx_msr_exit_save_pa);
>
>       msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va;
>
>       for (i = 0; i < VMX_NUM_MSR_STORE; i++) {
> -             DPRINTF("  MSR %d @ %p : 0x%08llx (%s), "
> +             DEBUG_ADD("  MSR %d @ %p : 0x%08llx (%s), "
>                   "value=0x%016llx ",
>                   i, &msr_store[i], msr_store[i].vms_index,
>                   msr_name_decode(msr_store[i].vms_index),
> @@ -8722,42 +8800,42 @@ vmm_segment_desc_decode(uint64_t val)
>       DPRINTF("granularity=%d dib=%d l(64 bit)=%d present=%d sys=%d ",
>           g, dib, l, p, s);
>
> -     DPRINTF("type=");
> +     DEBUG_ADD("type=");
>       if (!s) {
>               switch (type) {
> -             case SDT_SYSLDT: DPRINTF("ldt\n"); break;
> -             case SDT_SYS386TSS: DPRINTF("tss (available)\n"); break;
> -             case SDT_SYS386BSY: DPRINTF("tss (busy)\n"); break;
> -             case SDT_SYS386CGT: DPRINTF("call gate\n"); break;
> -             case SDT_SYS386IGT: DPRINTF("interrupt gate\n"); break;
> -             case SDT_SYS386TGT: DPRINTF("trap gate\n"); break;
> +             case SDT_SYSLDT: DEBUG_ADD("ldt\n"); break;
> +             case SDT_SYS386TSS: DEBUG_ADD("tss (available)\n"); break;
> +             case SDT_SYS386BSY: DEBUG_ADD("tss (busy)\n"); break;
> +             case SDT_SYS386CGT: DEBUG_ADD("call gate\n"); break;
> +             case SDT_SYS386IGT: DEBUG_ADD("interrupt gate\n"); break;
> +             case SDT_SYS386TGT: DEBUG_ADD("trap gate\n"); break;
>               /* XXX handle 32 bit segment types by inspecting mode */
> -             default: DPRINTF("unknown");
> +             default: DEBUG_ADD("unknown");
>               }
>       } else {
>               switch (type + 16) {
> -             case SDT_MEMRO: DPRINTF("data, r/o\n"); break;
> -             case SDT_MEMROA: DPRINTF("data, r/o, accessed\n"); break;
> -             case SDT_MEMRW: DPRINTF("data, r/w\n"); break;
> -             case SDT_MEMRWA: DPRINTF("data, r/w, accessed\n"); break;
> -             case SDT_MEMROD: DPRINTF("data, r/o, expand down\n"); break;
> -             case SDT_MEMRODA: DPRINTF("data, r/o, expand down, "
> +             case SDT_MEMRO: DEBUG_ADD("data, r/o\n"); break;
> +             case SDT_MEMROA: DEBUG_ADD("data, r/o, accessed\n"); break;
> +             case SDT_MEMRW: DEBUG_ADD("data, r/w\n"); break;
> +             case SDT_MEMRWA: DEBUG_ADD("data, r/w, accessed\n"); break;
> +             case SDT_MEMROD: DEBUG_ADD("data, r/o, expand down\n"); break;
> +             case SDT_MEMRODA: DEBUG_ADD("data, r/o, expand down, "
>                   "accessed\n");
>                       break;
> -             case SDT_MEMRWD: DPRINTF("data, r/w, expand down\n"); break;
> -             case SDT_MEMRWDA: DPRINTF("data, r/w, expand down, "
> +             case SDT_MEMRWD: DEBUG_ADD("data, r/w, expand down\n"); break;
> +             case SDT_MEMRWDA: DEBUG_ADD("data, r/w, expand down, "
>                   "accessed\n");
>                       break;
> -             case SDT_MEME: DPRINTF("code, x only\n"); break;
> -             case SDT_MEMEA: DPRINTF("code, x only, accessed\n");
> -             case SDT_MEMER: DPRINTF("code, r/x\n"); break;
> -             case SDT_MEMERA: DPRINTF("code, r/x, accessed\n"); break;
> -             case SDT_MEMEC: DPRINTF("code, x only, conforming\n"); break;
> -             case SDT_MEMEAC: DPRINTF("code, x only, conforming, "
> +             case SDT_MEME: DEBUG_ADD("code, x only\n"); break;
> +             case SDT_MEMEA: DEBUG_ADD("code, x only, accessed\n");
> +             case SDT_MEMER: DEBUG_ADD("code, r/x\n"); break;
> +             case SDT_MEMERA: DEBUG_ADD("code, r/x, accessed\n"); break;
> +             case SDT_MEMEC: DEBUG_ADD("code, x only, conforming\n"); break;
> +             case SDT_MEMEAC: DEBUG_ADD("code, x only, conforming, "
>                   "accessed\n");
>                       break;
> -             case SDT_MEMERC: DPRINTF("code, r/x, conforming\n"); break;
> -             case SDT_MEMERAC: DPRINTF("code, r/x, conforming, accessed\n");
> +             case SDT_MEMERC: DEBUG_ADD("code, r/x, conforming\n"); break;
> +             case SDT_MEMERAC: DEBUG_ADD("code, r/x, conforming, 
> accessed\n");
>                       break;
>               }
>       }
> @@ -8782,14 +8860,14 @@ vmm_decode_cr0(uint64_t cr0)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(cr0_info); i++)
>               if (cr0 & cr0_info[i].vrdi_bit)
> -                     DPRINTF("%s", cr0_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", cr0_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", cr0_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", cr0_info[i].vrdi_absent);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -8804,22 +8882,22 @@ vmm_decode_cr3(uint64_t cr3)
>       uint8_t i;
>
>       if (vmread(VMCS_GUEST_IA32_CR4, &cr4)) {
> -             DPRINTF("(error)\n");
> +             DEBUG_ADD("(error)\n");
>               return;
>       }
>
>       /* If CR4.PCIDE = 0, interpret CR3.PWT and CR3.PCD */
>       if ((cr4 & CR4_PCIDE) == 0) {
> -             DPRINTF("(");
> +             DEBUG_ADD("(");
>               for (i = 0 ; i < nitems(cr3_info) ; i++)
>                       if (cr3 & cr3_info[i].vrdi_bit)
> -                             DPRINTF("%s", cr3_info[i].vrdi_present);
> +                             DEBUG_ADD("%s", cr3_info[i].vrdi_present);
>                       else
> -                             DPRINTF("%s", cr3_info[i].vrdi_absent);
> +                             DEBUG_ADD("%s", cr3_info[i].vrdi_absent);
>
> -             DPRINTF(")\n");
> +             DEBUG_ADD(")\n");
>       } else {
> -             DPRINTF("(pcid=0x%llx)\n", cr3 & 0xFFF);
> +             DEBUG_ADD("(pcid=0x%llx)\n", cr3 & 0xFFF);
>       }
>  }
>
> @@ -8850,14 +8928,14 @@ vmm_decode_cr4(uint64_t cr4)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(cr4_info); i++)
>               if (cr4 & cr4_info[i].vrdi_bit)
> -                     DPRINTF("%s", cr4_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", cr4_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", cr4_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", cr4_info[i].vrdi_absent);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -8871,14 +8949,14 @@ vmm_decode_apicbase_msr_value(uint64_t apicbase)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(apicbase_info); i++)
>               if (apicbase & apicbase_info[i].vrdi_bit)
> -                     DPRINTF("%s", apicbase_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", apicbase_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", apicbase_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", apicbase_info[i].vrdi_absent);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -8893,18 +8971,18 @@ vmm_decode_ia32_fc_value(uint64_t fcr)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(fcr_info); i++)
>               if (fcr & fcr_info[i].vrdi_bit)
> -                     DPRINTF("%s", fcr_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", fcr_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", fcr_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", fcr_info[i].vrdi_absent);
>
>       if (fcr & IA32_FEATURE_CONTROL_SENTER_EN)
> -             DPRINTF(" [SENTER param = 0x%llx]",
> +             DEBUG_ADD(" [SENTER param = 0x%llx]",
>                   (fcr & IA32_FEATURE_CONTROL_SENTER_PARAM_MASK) >> 8);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -8918,30 +8996,30 @@ vmm_decode_mtrrcap_value(uint64_t val)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(mtrrcap_info); i++)
>               if (val & mtrrcap_info[i].vrdi_bit)
> -                     DPRINTF("%s", mtrrcap_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", mtrrcap_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", mtrrcap_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", mtrrcap_info[i].vrdi_absent);
>
>       if (val & MTRRcap_FIXED)
> -             DPRINTF(" [nr fixed ranges = 0x%llx]",
> +             DEBUG_ADD(" [nr fixed ranges = 0x%llx]",
>                   (val & 0xff));
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
>  vmm_decode_perf_status_value(uint64_t val)
>  {
> -     DPRINTF("(pstate ratio = 0x%llx)\n", (val & 0xffff));
> +     DEBUG_ADD("(pstate ratio = 0x%llx)\n", (val & 0xffff));
>  }
>
>  void vmm_decode_perf_ctl_value(uint64_t val)
>  {
> -     DPRINTF("(%s ", (val & PERF_CTL_TURBO) ? "TURBO" : "turbo");
> -     DPRINTF("pstate req = 0x%llx)\n", (val & 0xfffF));
> +     DEBUG_ADD("(%s ", (val & PERF_CTL_TURBO) ? "TURBO" : "turbo");
> +     DEBUG_ADD("pstate req = 0x%llx)\n", (val & 0xfffF));
>  }
>
>  void
> @@ -8955,28 +9033,28 @@ vmm_decode_mtrrdeftype_value(uint64_t mtrrdeftype)
>       uint8_t i;
>       int type;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(mtrrdeftype_info); i++)
>               if (mtrrdeftype & mtrrdeftype_info[i].vrdi_bit)
> -                     DPRINTF("%s", mtrrdeftype_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", mtrrdeftype_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", mtrrdeftype_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", mtrrdeftype_info[i].vrdi_absent);
>
> -     DPRINTF("type = ");
> +     DEBUG_ADD("type = ");
>       type = mtrr2mrt(mtrrdeftype & 0xff);
>       switch (type) {
> -     case MDF_UNCACHEABLE: DPRINTF("UC"); break;
> -     case MDF_WRITECOMBINE: DPRINTF("WC"); break;
> -     case MDF_WRITETHROUGH: DPRINTF("WT"); break;
> -     case MDF_WRITEPROTECT: DPRINTF("RO"); break;
> -     case MDF_WRITEBACK: DPRINTF("WB"); break;
> +     case MDF_UNCACHEABLE: DEBUG_ADD("UC"); break;
> +     case MDF_WRITECOMBINE: DEBUG_ADD("WC"); break;
> +     case MDF_WRITETHROUGH: DEBUG_ADD("WT"); break;
> +     case MDF_WRITEPROTECT: DEBUG_ADD("RO"); break;
> +     case MDF_WRITEBACK: DEBUG_ADD("WB"); break;
>       case MDF_UNKNOWN:
>       default:
> -             DPRINTF("??");
> +             DEBUG_ADD("??");
>               break;
>       }
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -8991,14 +9069,14 @@ vmm_decode_efer_value(uint64_t efer)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(efer_info); i++)
>               if (efer & efer_info[i].vrdi_bit)
> -                     DPRINTF("%s", efer_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", efer_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", efer_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", efer_info[i].vrdi_absent);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -9013,7 +9091,7 @@ vmm_decode_msr_value(uint64_t msr, uint64_t val)
>       case MSR_MTRRdefType: vmm_decode_mtrrdeftype_value(val); break;
>       case MSR_EFER: vmm_decode_efer_value(val); break;
>       case MSR_MISC_ENABLE: vmm_decode_misc_enable_value(val); break;
> -     default: DPRINTF("\n");
> +     default: DEBUG_ADD("\n");
>       }
>  }
>
> @@ -9041,17 +9119,17 @@ vmm_decode_rflags(uint64_t rflags)
>
>       uint8_t i, iopl;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(rflags_info); i++)
>               if (rflags & rflags_info[i].vrdi_bit)
> -                     DPRINTF("%s", rflags_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", rflags_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", rflags_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", rflags_info[i].vrdi_absent);
>
>       iopl = (rflags & PSL_IOPL) >> 12;
> -     DPRINTF("IOPL=%d", iopl);
> +     DEBUG_ADD("IOPL=%d", iopl);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  void
> @@ -9072,14 +9150,14 @@ vmm_decode_misc_enable_value(uint64_t misc)
>
>       uint8_t i;
>
> -     DPRINTF("(");
> +     DEBUG_ADD("(");
>       for (i = 0; i < nitems(misc_info); i++)
>               if (misc & misc_info[i].vrdi_bit)
> -                     DPRINTF("%s", misc_info[i].vrdi_present);
> +                     DEBUG_ADD("%s", misc_info[i].vrdi_present);
>               else
> -                     DPRINTF("%s", misc_info[i].vrdi_absent);
> +                     DEBUG_ADD("%s", misc_info[i].vrdi_absent);
>
> -     DPRINTF(")\n");
> +     DEBUG_ADD(")\n");
>  }
>
>  const char *
> blob - 4bc8627b554e1233ed55f23df30c8c2052fa84b2
> blob + 357fc099c307aaa7ee075243847e791188ad615a
> --- sys/arch/amd64/include/cpu.h
> +++ sys/arch/amd64/include/cpu.h
> @@ -49,7 +49,7 @@
>  #endif /* _KERNEL */
>
>  #include <sys/device.h>
> -#include <sys/rwlock.h>
> +#include <sys/mutex.h>
>  #include <sys/sched.h>
>  #include <sys/sensors.h>
>  #include <sys/srp.h>
> @@ -214,7 +214,7 @@ struct cpu_info {
>       char            ci_panicbuf[512];
>
>       paddr_t         ci_vmcs_pa;
> -     struct rwlock   ci_vmcs_lock;
> +     struct mutex    ci_vmcs_mtx;
>  };
>
>  #define CPUF_BSP     0x0001          /* CPU is the original BSP */
> blob - 94bb172832d4c2847b1e83ebb9cc05538db6ac80
> blob + dc8eb4ca62dbe3a0c01a3d37f298b250bd8daac2
> --- sys/arch/amd64/include/vmmvar.h
> +++ sys/arch/amd64/include/vmmvar.h
> @@ -945,6 +945,7 @@ struct vcpu {
>       uint64_t vc_shadow_pat;
>
>       /* VMX only */
> +     struct mutex vc_vmx_mtx;
>       uint64_t vc_vmx_basic;
>       uint64_t vc_vmx_entry_ctls;
>       uint64_t vc_vmx_true_entry_ctls;


--
-Dave Voutila

Reply via email to