On Thu, May 04, 2017 at 11:17:12PM -0700, Mike Larkin wrote: > This diff limits the ASID/VPID value to 0xFFF (4095), or in the case of SVN, > the max ASID capability of the CPU. I use a bitmap to record the VPIDs/ASIDs > in use, and allocate the next one available when needed. Although VMX can > support 65535 VPIDs, 4095 seems like a reasonable value for the number of > "VCPUs currently in use" for a given machine. The bitmap is easily extended > if needed. > > Tested with VM startup, teardown, watching the VPID recycling when VMM_DEBUG > is enabled. > > ok? > > -ml
Just noticed identcpu.c got omitted from the previous diff. -ml Index: arch/amd64/amd64/identcpu.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/identcpu.c,v retrieving revision 1.83 diff -u -p -a -u -r1.83 identcpu.c --- arch/amd64/amd64/identcpu.c 14 Apr 2017 01:02:28 -0000 1.83 +++ arch/amd64/amd64/identcpu.c 5 May 2017 06:25:57 -0000 @@ -949,8 +949,8 @@ cpu_check_vmm_cap(struct cpu_info *ci) CPUID(0x8000000A, dummy, ci->ci_vmm_cap.vcc_svm.svm_max_asid, dummy, dummy); - if (ci->ci_vmm_cap.vcc_svm.svm_max_asid > 0xFFFF) - ci->ci_vmm_cap.vcc_svm.svm_max_asid = 0xFFFF; + if (ci->ci_vmm_cap.vcc_svm.svm_max_asid > 0xFFF) + ci->ci_vmm_cap.vcc_svm.svm_max_asid = 0xFFF; } /* Index: arch/amd64/amd64/vmm.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/vmm.c,v retrieving revision 1.138 diff -u -p -a -u -r1.138 vmm.c --- arch/amd64/amd64/vmm.c 2 May 2017 02:57:46 -0000 1.138 +++ arch/amd64/amd64/vmm.c 5 May 2017 06:12:38 -0000 @@ -100,6 +100,10 @@ struct vmm_softc { struct rwlock vm_lock; size_t vm_ct; /* number of in-memory VMs */ size_t vm_idx; /* next unique VM index */ + + struct rwlock vpid_lock; + uint16_t max_vpid; + uint8_t vpids[512]; /* bitmap of used VPID/ASIDs */ }; int vmm_enabled(void); @@ -165,6 +169,8 @@ int svm_get_guest_faulttype(void); int vmx_get_exit_qualification(uint64_t *); int vmx_fault_page(struct vcpu *, paddr_t); int vmx_handle_np_fault(struct vcpu *); +int vmm_alloc_vpid(uint16_t *); +void vmm_free_vpid(uint16_t); const char *vcpu_state_decode(u_int); const char *vmx_exit_reason_decode(uint32_t); const char *vmx_instruction_error_decode(uint32_t); @@ -361,6 +367,15 @@ vmm_attach(struct device *parent, struct sc->mode = VMM_MODE_UNKNOWN; } + if (sc->mode == VMM_MODE_SVM || sc->mode == VMM_MODE_RVI) { + sc->max_vpid = ci->ci_vmm_cap.vcc_svm.svm_max_asid; + } else { + sc->max_vpid = 0xFFF; + } + + bzero(&sc->vpids, sizeof(sc->vpids)); + rw_init(&sc->vpid_lock, "vpidlock"); + pool_init(&vm_pool, sizeof(struct vm), 0, IPL_NONE, PR_WAITOK, "vmpool", NULL); pool_init(&vcpu_pool, sizeof(struct vcpu), 64, IPL_NONE, PR_WAITOK, @@ -1033,10 +1048,6 @@ vm_create(struct vm_create_params *vcp, vmm_softc->vm_ct++; vmm_softc->vm_idx++; - /* - * XXX we use the vm_id for the VPID/ASID, so we need to prevent - * wrapping around 65536/4096 entries here - */ vm->vm_id = vmm_softc->vm_idx; vm->vm_vcpu_ct = 0; vm->vm_vcpus_running = 0; @@ -1671,6 +1682,7 @@ vcpu_reset_regs_svm(struct vcpu *vcpu, s { struct vmcb *vmcb; int ret; + uint16_t asid; vmcb = (struct vmcb *)vcpu->vc_control_va; @@ -1726,7 +1738,14 @@ vcpu_reset_regs_svm(struct vcpu *vcpu, s svm_setmsrbr(vcpu, MSR_EFER); /* Guest VCPU ASID */ - vmcb->v_asid = vcpu->vc_parent->vm_id; + if (vmm_alloc_vpid(&asid)) { + DPRINTF("%s: could not allocate asid\n", __func__); + ret = EINVAL; + goto exit; + } + + vmcb->v_asid = asid; + vcpu->vc_vpid = asid; /* TLB Control */ vmcb->v_tlb_control = 2; /* Flush this guest's TLB entries */ @@ -1745,6 +1764,7 @@ vcpu_reset_regs_svm(struct vcpu *vcpu, s vmcb->v_efer |= (EFER_LME | EFER_LMA); vmcb->v_cr4 |= CR4_PAE; +exit: return ret; } @@ -1947,7 +1967,7 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, s uint32_t pinbased, procbased, procbased2, exit, entry; uint32_t want1, want0; uint64_t msr, ctrlval, eptp, cr3; - uint16_t ctrl; + uint16_t ctrl, vpid; struct vmx_msr_store *msr_store; ret = 0; @@ -2203,12 +2223,20 @@ vcpu_reset_regs_vmx(struct vcpu *vcpu, s if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS, IA32_VMX_ACTIVATE_SECONDARY_CONTROLS, 1)) { if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS, - IA32_VMX_ENABLE_VPID, 1)) - if (vmwrite(VMCS_GUEST_VPID, - (uint16_t)vcpu->vc_parent->vm_id)) { + IA32_VMX_ENABLE_VPID, 1)) { + if (vmm_alloc_vpid(&vpid)) { + DPRINTF("%s: could not allocate VPID\n", + __func__); + ret = EINVAL; + goto exit; + } + if (vmwrite(VMCS_GUEST_VPID, vpid)) { ret = EINVAL; goto exit; } + + vcpu->vc_vpid = vpid; + } } /* @@ -2769,6 +2797,7 @@ vcpu_init(struct vcpu *vcpu) vcpu->vc_virt_mode = vmm_softc->mode; vcpu->vc_state = VCPU_STATE_STOPPED; + vcpu->vc_vpid = 0; if (vmm_softc->mode == VMM_MODE_VMX || vmm_softc->mode == VMM_MODE_EPT) ret = vcpu_init_vmx(vcpu); @@ -2804,6 +2833,9 @@ vcpu_deinit_vmx(struct vcpu *vcpu) if (vcpu->vc_vmx_msr_entry_load_va) km_free((void *)vcpu->vc_vmx_msr_entry_load_va, PAGE_SIZE, &kv_page, &kp_zero); + + if (vcpu->vc_vmx_vpid_enabled) + vmm_free_vpid(vcpu->vc_vpid); } /* @@ -2829,6 +2861,8 @@ vcpu_deinit_svm(struct vcpu *vcpu) if (vcpu->vc_svm_ioio_va) km_free((void *)vcpu->vc_svm_ioio_va, 3 * PAGE_SIZE, &kv_any, &vmm_kp_contig); + + vmm_free_vpid(vcpu->vc_vpid); } /* @@ -4940,6 +4974,72 @@ vcpu_run_svm(struct vcpu *vcpu, struct v { /* XXX removed due to rot */ return (0); +} + +/* + * vmm_alloc_vpid + * + * Sets the memory location pointed to by "vpid" to the next available VPID + * or ASID. + * + * Parameters: + * vpid: Pointer to location to receive the next VPID/ASID + * + * Return Values: + * 0: The operation completed successfully + * ENOMEM: No VPIDs/ASIDs were available. Content of 'vpid' is unchanged. + */ +int +vmm_alloc_vpid(uint16_t *vpid) +{ + uint16_t i; + uint8_t idx, bit; + struct vmm_softc *sc = vmm_softc; + + rw_enter_write(&vmm_softc->vpid_lock); + for (i = 1; i <= sc->max_vpid; i++) { + idx = i / 8; + bit = i - (idx * 8); + + if (!(sc->vpids[idx] & (1 << bit))) { + sc->vpids[idx] |= (1 << bit); + *vpid = i; + DPRINTF("%s: allocated VPID/ASID %d\n", __func__, + i); + rw_exit_write(&vmm_softc->vpid_lock); + return 0; + } + } + + printf("%s: no available %ss\n", __func__, + (sc->mode == VMM_MODE_EPT || sc->mode == VMM_MODE_VMX) ? "VPID" : + "ASID"); + + rw_exit_write(&vmm_softc->vpid_lock); + return ENOMEM; +} + +/* + * vmm_free_vpid + * + * Frees the VPID/ASID id supplied in "vpid". + * + * Parameters: + * vpid: VPID/ASID to free. + */ +void +vmm_free_vpid(uint16_t vpid) +{ + uint8_t idx, bit; + struct vmm_softc *sc = vmm_softc; + + rw_enter_write(&vmm_softc->vpid_lock); + idx = vpid / 8; + bit = vpid - (idx * 8); + sc->vpids[idx] &= ~(1 << bit); + + DPRINTF("%s: freed VPID/ASID %d\n", __func__, vpid); + rw_exit_write(&vmm_softc->vpid_lock); } /* Index: arch/amd64/include/vmmvar.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/vmmvar.h,v retrieving revision 1.36 diff -u -p -a -u -r1.36 vmmvar.h --- arch/amd64/include/vmmvar.h 2 May 2017 02:57:46 -0000 1.36 +++ arch/amd64/include/vmmvar.h 5 May 2017 03:42:14 -0000 @@ -681,6 +681,7 @@ struct vcpu { struct vm *vc_parent; uint32_t vc_id; + uint16_t vc_vpid; u_int vc_state; SLIST_ENTRY(vcpu) vc_vcpu_link;