ping

Dave Voutila <d...@sisu.io> writes:

> This tucks all the spinout paranoia behind `#ifdef MP_LOCKDEBUG` and
> uses the same approach used in amd64's pmap's TLB shootdown code.
>
> Part of me wants to remove this altogether, but I'm not sure it's
> outlived its usefulness quite yet.
>
> Three areas that busy wait on ipi's are modified:
>
> 1. vmm_start - performs ipi to enable vmm on all cpus
> 2. vmm_stop - performs ipi to disable vmm on all cpus
> 3. vmx_remote_vmclear - performs ipi to vmclear a cpu (only pertinent to
>    Intel hosts)
>
> (3) is the most likely to spin out and prior to bumping the spinout to
> the current value (based on __mp_lock_spinout) we had reports from users
> of hitting it on slower/older MP hardware.
>
> For vmm_{start, stop}, I moved the current cpu start/stop routine to
> before performing the ipi broadcast because if we're going to fail to
> (dis)enable vmm we should fail fast. If we fail, there's no need to
> broadcast the ipi. This simplifies the code paths and removes a local
> variable.
>
> All three migrate to infinite busy waits and only have a spinout if
> built with MP_LOCKDEBUG. On a spinout, we enter ddb.
>
> Compiled on amd64 GENERIC, GENERIC.MP, and GENERIC.MP with
> MP_LOCKDEBUG. (This time I won't break GENERIC :)
>
> OK?
>
> -dv
>
> Index: sys/arch/amd64/amd64/vmm.c
> ===================================================================
> RCS file: /opt/cvs/src/sys/arch/amd64/amd64/vmm.c,v
> retrieving revision 1.305
> diff -u -p -r1.305 vmm.c
> --- sys/arch/amd64/amd64/vmm.c        28 Mar 2022 06:28:47 -0000      1.305
> +++ sys/arch/amd64/amd64/vmm.c        16 Apr 2022 18:49:01 -0000
> @@ -43,6 +43,11 @@
>  #include <dev/isa/isareg.h>
>  #include <dev/pv/pvreg.h>
>
> +#ifdef MP_LOCKDEBUG
> +#include <ddb/db_output.h>
> +extern int __mp_lock_spinout;
> +#endif /* MP_LOCKDEBUG */
> +
>  /* #define VMM_DEBUG */
>
>  void *l1tf_flush_region;
> @@ -1328,17 +1333,26 @@ int
>  vmm_start(void)
>  {
>       struct cpu_info *self = curcpu();
> -     int ret = 0;
>  #ifdef MULTIPROCESSOR
>       struct cpu_info *ci;
>       CPU_INFO_ITERATOR cii;
> -     int i;
> -#endif
> +#ifdef MP_LOCKDEBUG
> +     int nticks;
> +#endif /* MP_LOCKDEBUG */
> +#endif /* MULTIPROCESSOR */
>
>       /* VMM is already running */
>       if (self->ci_flags & CPUF_VMM)
>               return (0);
>
> +     /* Start VMM on this CPU */
> +     start_vmm_on_cpu(self);
> +     if (!(self->ci_flags & CPUF_VMM)) {
> +             printf("%s: failed to enter VMM mode\n",
> +                     self->ci_dev->dv_xname);
> +             return (EIO);
> +     }
> +
>  #ifdef MULTIPROCESSOR
>       /* Broadcast start VMM IPI */
>       x86_broadcast_ipi(X86_IPI_START_VMM);
> @@ -1346,25 +1360,23 @@ vmm_start(void)
>       CPU_INFO_FOREACH(cii, ci) {
>               if (ci == self)
>                       continue;
> -             for (i = 100000; (!(ci->ci_flags & CPUF_VMM)) && i>0;i--)
> -                     delay(10);
> -             if (!(ci->ci_flags & CPUF_VMM)) {
> -                     printf("%s: failed to enter VMM mode\n",
> -                             ci->ci_dev->dv_xname);
> -                     ret = EIO;
> +#ifdef MP_LOCKDEBUG
> +             nticks = __mp_lock_spinout;
> +#endif /* MP_LOCKDEBUG */
> +             while (!(ci->ci_flags & CPUF_VMM)) {
> +                     CPU_BUSY_CYCLE();
> +#ifdef MP_LOCKDEBUG
> +                     if (--nticks <= 0) {
> +                             db_printf("%s: spun out", __func__);
> +                             db_enter();
> +                             nticks = __mp_lock_spinout;
> +                     }
> +#endif /* MP_LOCKDEBUG */
>               }
>       }
>  #endif /* MULTIPROCESSOR */
>
> -     /* Start VMM on this CPU */
> -     start_vmm_on_cpu(self);
> -     if (!(self->ci_flags & CPUF_VMM)) {
> -             printf("%s: failed to enter VMM mode\n",
> -                     self->ci_dev->dv_xname);
> -             ret = EIO;
> -     }
> -
> -     return (ret);
> +     return (0);
>  }
>
>  /*
> @@ -1376,17 +1388,26 @@ int
>  vmm_stop(void)
>  {
>       struct cpu_info *self = curcpu();
> -     int ret = 0;
>  #ifdef MULTIPROCESSOR
>       struct cpu_info *ci;
>       CPU_INFO_ITERATOR cii;
> -     int i;
> -#endif
> +#ifdef MP_LOCKDEBUG
> +     int nticks;
> +#endif /* MP_LOCKDEBUG */
> +#endif /* MULTIPROCESSOR */
>
>       /* VMM is not running */
>       if (!(self->ci_flags & CPUF_VMM))
>               return (0);
>
> +     /* Stop VMM on this CPU */
> +     stop_vmm_on_cpu(self);
> +     if (self->ci_flags & CPUF_VMM) {
> +             printf("%s: failed to exit VMM mode\n",
> +                     self->ci_dev->dv_xname);
> +             return (EIO);
> +     }
> +
>  #ifdef MULTIPROCESSOR
>       /* Stop VMM on other CPUs */
>       x86_broadcast_ipi(X86_IPI_STOP_VMM);
> @@ -1394,25 +1415,23 @@ vmm_stop(void)
>       CPU_INFO_FOREACH(cii, ci) {
>               if (ci == self)
>                       continue;
> -             for (i = 100000; (ci->ci_flags & CPUF_VMM) && i>0 ;i--)
> -                     delay(10);
> -             if (ci->ci_flags & CPUF_VMM) {
> -                     printf("%s: failed to exit VMM mode\n",
> -                             ci->ci_dev->dv_xname);
> -                     ret = EIO;
> +#ifdef MP_LOCKDEBUG
> +             nticks = __mp_lock_spinout;
> +#endif /* MP_LOCKDEBUG */
> +             while ((ci->ci_flags & CPUF_VMM)) {
> +                     CPU_BUSY_CYCLE();
> +#ifdef MP_LOCKDEBUG
> +                     if (--nticks <= 0) {
> +                             db_printf("%s: spunout", __func__);
> +                             db_enter();
> +                             nticks = __mp_lock_spinout;
> +                     }
> +#endif /* MP_LOCKDEBUG */
>               }
>       }
>  #endif /* MULTIPROCESSOR */
>
> -     /* Stop VMM on this CPU */
> -     stop_vmm_on_cpu(self);
> -     if (self->ci_flags & CPUF_VMM) {
> -             printf("%s: failed to exit VMM mode\n",
> -                     self->ci_dev->dv_xname);
> -             ret = EIO;
> -     }
> -
> -     return (ret);
> +     return (0);
>  }
>
>  /*
> @@ -1536,7 +1555,9 @@ vmclear_on_cpu(struct cpu_info *ci)
>  static int
>  vmx_remote_vmclear(struct cpu_info *ci, struct vcpu *vcpu)
>  {
> -     int ret = 0, nticks = 200000000;
> +#ifdef MP_LOCKDEBUG
> +     int nticks = __mp_lock_spinout;
> +#endif /* MP_LOCKDEBUG */
>
>       rw_enter_write(&ci->ci_vmcs_lock);
>       atomic_swap_ulong(&ci->ci_vmcs_pa, vcpu->vc_control_pa);
> @@ -1544,16 +1565,18 @@ vmx_remote_vmclear(struct cpu_info *ci,
>
>       while (ci->ci_vmcs_pa != VMX_VMCS_PA_CLEAR) {
>               CPU_BUSY_CYCLE();
> +#ifdef MP_LOCKDEBUG
>               if (--nticks <= 0) {
> -                     printf("%s: spun out\n", __func__);
> -                     ret = 1;
> -                     break;
> +                     db_printf("%s: spun out\n", __func__);
> +                     db_enter();
> +                     nticks = __mp_lock_spinout;
>               }
> +#endif /* MP_LOCKDEBUG */
>       }
>       atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED);
>       rw_exit_write(&ci->ci_vmcs_lock);
>
> -     return (ret);
> +     return (0);
>  }
>  #endif /* MULTIPROCESSOR */


--
-Dave Voutila

Reply via email to