On Wed, May 18, 2022 at 10:27:11AM -0400, Dave Voutila wrote:
>
> ping...would like to get this in if possible so I can move onto fixing
> some things in vmm.
>

sorry. ok mlarkin

> Dave Voutila <d...@sisu.io> writes:
>
> > tech@,
> >
> > Continuing my vmm/vmd bug hunt, the following diff adapts
> > vcpu_readregs_vmx to optionally load the vmcs on the current cpu. This
> > has gone unnoticed as the ioctl isn't used in typical vmd usage and the
> > usage of vcpu_readregs_vmx in the run ioctl is after the vmcs is already
> > loaded on the current cpu.
> >
> > This fixes `vmctl send` on Intel hosts. (A fix for `vmctl receive` comes
> > next.)
> >
> > Currently, `vmctl send` tries to serialize the vcpu registers as part of
> > serializing the vm state. On an MP machine, it's highly probable that
> > the vmread instructions will fail as they'll be executed on a cpu that
> > doesn't have the vmcs loaded.
> >
> > While here, I noticed the vcpu_writeregs_vmx function doesn't set the
> > vcpu's vmcs state variable to VMCS_CLEARED after running vmclear. This
> > can cause failure to vm-enter as vmm uses that state to determine which
> > of the two Intel instructions to call (vmlaunch or vmresume).
> >
> > ok?
> >
> > -dv
> >
> > Index: sys/arch/amd64/amd64/vmm.c
> > ===================================================================
> > RCS file: /opt/cvs/src/sys/arch/amd64/amd64/vmm.c,v
> > retrieving revision 1.308
> > diff -u -p -r1.308 vmm.c
> > --- sys/arch/amd64/amd64/vmm.c      4 May 2022 02:24:26 -0000       1.308
> > +++ sys/arch/amd64/amd64/vmm.c      8 May 2022 18:37:42 -0000
> > @@ -140,7 +140,7 @@ int vm_rwregs(struct vm_rwregs_params *,
> >  int vm_mprotect_ept(struct vm_mprotect_ept_params *);
> >  int vm_rwvmparams(struct vm_rwvmparams_params *, int);
> >  int vm_find(uint32_t, struct vm **);
> > -int vcpu_readregs_vmx(struct vcpu *, uint64_t, struct vcpu_reg_state *);
> > +int vcpu_readregs_vmx(struct vcpu *, uint64_t, int, struct vcpu_reg_state 
> > *);
> >  int vcpu_readregs_svm(struct vcpu *, uint64_t, struct vcpu_reg_state *);
> >  int vcpu_writeregs_vmx(struct vcpu *, uint64_t, int, struct vcpu_reg_state 
> > *);
> >  int vcpu_writeregs_svm(struct vcpu *, uint64_t, struct vcpu_reg_state *);
> > @@ -978,7 +978,7 @@ vm_rwregs(struct vm_rwregs_params *vrwp,
> >     if (vmm_softc->mode == VMM_MODE_VMX ||
> >         vmm_softc->mode == VMM_MODE_EPT)
> >             ret = (dir == 0) ?
> > -               vcpu_readregs_vmx(vcpu, vrwp->vrwp_mask, vrs) :
> > +               vcpu_readregs_vmx(vcpu, vrwp->vrwp_mask, 1, vrs) :
> >                 vcpu_writeregs_vmx(vcpu, vrwp->vrwp_mask, 1, vrs);
> >     else if (vmm_softc->mode == VMM_MODE_SVM ||
> >         vmm_softc->mode == VMM_MODE_RVI)
> > @@ -1986,6 +1986,7 @@ vcpu_reload_vmcs_vmx(struct vcpu *vcpu)
> >   * Parameters:
> >   *  vcpu: the vcpu to read register values from
> >   *  regmask: the types of registers to read
> > + *  loadvmcs: bit to indicate whether the VMCS has to be loaded first
> >   *  vrs: output parameter where register values are stored
> >   *
> >   * Return values:
> > @@ -1993,7 +1994,7 @@ vcpu_reload_vmcs_vmx(struct vcpu *vcpu)
> >   *  EINVAL: an error reading registers occurred
> >   */
> >  int
> > -vcpu_readregs_vmx(struct vcpu *vcpu, uint64_t regmask,
> > +vcpu_readregs_vmx(struct vcpu *vcpu, uint64_t regmask, int loadvmcs,
> >      struct vcpu_reg_state *vrs)
> >  {
> >     int i, ret = 0;
> > @@ -2005,6 +2006,11 @@ vcpu_readregs_vmx(struct vcpu *vcpu, uin
> >     struct vcpu_segment_info *sregs = vrs->vrs_sregs;
> >     struct vmx_msr_store *msr_store;
> >
> > +   if (loadvmcs) {
> > +           if (vcpu_reload_vmcs_vmx(vcpu))
> > +                   return (EINVAL);
> > +   }
> > +
> >  #ifdef VMM_DEBUG
> >     /* VMCS should be loaded... */
> >     paddr_t pa = 0ULL;
> > @@ -2393,6 +2399,7 @@ out:
> >     if (loadvmcs) {
> >             if (vmclear(&vcpu->vc_control_pa))
> >                     ret = EINVAL;
> > +           atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED);
> >     }
> >     return (ret);
> >  }
> > @@ -4631,7 +4638,7 @@ vmm_translate_gva(struct vcpu *vcpu, uin
> >
> >     if (vmm_softc->mode == VMM_MODE_EPT ||
> >         vmm_softc->mode == VMM_MODE_VMX) {
> > -           if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL, &vrs))
> > +           if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL, 1, &vrs))
> >                     return (EINVAL);
> >     } else if (vmm_softc->mode == VMM_MODE_RVI ||
> >         vmm_softc->mode == VMM_MODE_SVM) {
> > @@ -5111,7 +5118,7 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
> >     vcpu->vc_last_pcpu = curcpu();
> >
> >     /* Copy the VCPU register state to the exit structure */
> > -   if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL, &vcpu->vc_exit.vrs))
> > +   if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL, 0, &vcpu->vc_exit.vrs))
> >             ret = EINVAL;
> >     vcpu->vc_exit.cpl = vmm_get_guest_cpu_cpl(vcpu);
>
>
> --
> -Dave Voutila
>

Reply via email to