[Xen-devel] Ping: [PATCH RFC v2] x86/domctl: Don't pause the whole domain if only getting vcpu state

2018-02-19 Thread Alexandru Stefan ISAILA
Ping?
> On Vi, 2017-10-06 at 13:02 +0300, Alexandru Isaila wrote:
> >
> > This patch adds the hvm_save_one_cpu_ctxt() function.
> > It optimizes by only pausing the vcpu on all HVMSR_PER_VCPU save
> > callbacks where only data for one VCPU is required.
> >
> > Signed-off-by: Alexandru Isaila 
> >
> > ---
> > Changes since V1:
> > - Integrated the vcpu check into all the save callbacks
> > ---
> >  tools/tests/vhpet/emul.h   |   3 +-
> >  tools/tests/vhpet/main.c   |   2 +-
> >  xen/arch/x86/cpu/mcheck/vmce.c |  16 ++-
> >  xen/arch/x86/domctl.c  |   2 -
> >  xen/arch/x86/hvm/hpet.c|   2 +-
> >  xen/arch/x86/hvm/hvm.c | 280 ++---
> > --
> > --
> >  xen/arch/x86/hvm/i8254.c   |   2 +-
> >  xen/arch/x86/hvm/irq.c |   6 +-
> >  xen/arch/x86/hvm/mtrr.c|  32 -
> >  xen/arch/x86/hvm/pmtimer.c |   2 +-
> >  xen/arch/x86/hvm/rtc.c |   2 +-
> >  xen/arch/x86/hvm/save.c|  71 ---
> >  xen/arch/x86/hvm/vioapic.c |   2 +-
> >  xen/arch/x86/hvm/viridian.c|  17 ++-
> >  xen/arch/x86/hvm/vlapic.c  |  23 +++-
> >  xen/arch/x86/hvm/vpic.c|   2 +-
> >  xen/include/asm-x86/hvm/hvm.h  |   2 +
> >  xen/include/asm-x86/hvm/save.h |   5 +-
> >  18 files changed, 324 insertions(+), 147 deletions(-)
> >
> > diff --git a/tools/tests/vhpet/emul.h b/tools/tests/vhpet/emul.h
> > index 383acff..99d5bbd 100644
> > --- a/tools/tests/vhpet/emul.h
> > +++ b/tools/tests/vhpet/emul.h
> > @@ -296,7 +296,8 @@ struct hvm_hw_hpet
> >  };
> >
> >  typedef int (*hvm_save_handler)(struct domain *d,
> > -hvm_domain_context_t *h);
> > +hvm_domain_context_t *h,
> > +unsigned int instance);
> >  typedef int (*hvm_load_handler)(struct domain *d,
> >  hvm_domain_context_t *h);
> >
> > diff --git a/tools/tests/vhpet/main.c b/tools/tests/vhpet/main.c
> > index 6fe65ea..3d8e7f5 100644
> > --- a/tools/tests/vhpet/main.c
> > +++ b/tools/tests/vhpet/main.c
> > @@ -177,7 +177,7 @@ void __init hvm_register_savevm(uint16_t
> > typecode,
> >
> >  int do_save(uint16_t typecode, struct domain *d,
> > hvm_domain_context_t *h)
> >  {
> > -return hvm_sr_handlers[typecode].save(d, h);
> > +return hvm_sr_handlers[typecode].save(d, h, d->max_vcpus);
> >  }
> >
> >  int do_load(uint16_t typecode, struct domain *d,
> > hvm_domain_context_t *h)
> > diff --git a/xen/arch/x86/cpu/mcheck/vmce.c
> > b/xen/arch/x86/cpu/mcheck/vmce.c
> > index e07cd2f..a1a12a5 100644
> > --- a/xen/arch/x86/cpu/mcheck/vmce.c
> > +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> > @@ -349,12 +349,24 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
> >  return ret;
> >  }
> >
> > -static int vmce_save_vcpu_ctxt(struct domain *d,
> > hvm_domain_context_t *h)
> > +static int vmce_save_vcpu_ctxt(struct domain *d,
> > hvm_domain_context_t *h, unsigned int instance)
> >  {
> >  struct vcpu *v;
> >  int err = 0;
> >
> > -for_each_vcpu ( d, v )
> > +if( instance < d->max_vcpus )
> > +{
> > +struct hvm_vmce_vcpu ctxt;
> > +
> > +v = d->vcpu[instance];
> > +ctxt.caps = v->arch.vmce.mcg_cap;
> > +ctxt.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
> > +ctxt.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
> > +ctxt.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
> > +
> > +err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
> > +}
> > +else for_each_vcpu ( d, v )
> >  {
> >  struct hvm_vmce_vcpu ctxt = {
> >  .caps = v->arch.vmce.mcg_cap,
> > diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> > index 540ba08..d3c4e14 100644
> > --- a/xen/arch/x86/domctl.c
> > +++ b/xen/arch/x86/domctl.c
> > @@ -624,12 +624,10 @@ long arch_do_domctl(
> >   !is_hvm_domain(d) )
> >  break;
> >
> > -domain_pause(d);
> >  ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
> > domctl->u.hvmcontext_partial.instance,
> > domctl->u.hvmcontext_partial.buffer,
> > >u.hvmcontext_partial.bufsz);
> > -domain_unpause(d);
> >
> >  if ( !ret )
> >  copyback = true;
> > diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
> > index 3ea895a..56f4691 100644
> > --- a/xen/arch/x86/hvm/hpet.c
> > +++ b/xen/arch/x86/hvm/hpet.c
> > @@ -509,7 +509,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops
> > =
> > {
> >  };
> >
> >
> > -static int hpet_save(struct domain *d, hvm_domain_context_t *h)
> > +static int hpet_save(struct domain *d, hvm_domain_context_t *h,
> > unsigned int instance)
> >  {
> >  HPETState *hp = domain_vhpet(d);
> >  struct vcpu *v = pt_global_vcpu_target(d);
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index 205b4cb..140f2c3 100644
> > --- 

[Xen-devel] Ping: [PATCH RFC v2] x86/domctl: Don't pause the whole domain if only getting vcpu state

2018-01-05 Thread Alexandru Stefan ISAILA
Any thoughts appreciated.

On Vi, 2017-10-06 at 13:02 +0300, Alexandru Isaila wrote:
> This patch adds the hvm_save_one_cpu_ctxt() function.
> It optimizes by only pausing the vcpu on all HVMSR_PER_VCPU save
> callbacks where only data for one VCPU is required.
>
> Signed-off-by: Alexandru Isaila 
>
> ---
> Changes since V1:
> - Integrated the vcpu check into all the save callbacks
> ---
>  tools/tests/vhpet/emul.h   |   3 +-
>  tools/tests/vhpet/main.c   |   2 +-
>  xen/arch/x86/cpu/mcheck/vmce.c |  16 ++-
>  xen/arch/x86/domctl.c  |   2 -
>  xen/arch/x86/hvm/hpet.c|   2 +-
>  xen/arch/x86/hvm/hvm.c | 280 ++-
> --
>  xen/arch/x86/hvm/i8254.c   |   2 +-
>  xen/arch/x86/hvm/irq.c |   6 +-
>  xen/arch/x86/hvm/mtrr.c|  32 -
>  xen/arch/x86/hvm/pmtimer.c |   2 +-
>  xen/arch/x86/hvm/rtc.c |   2 +-
>  xen/arch/x86/hvm/save.c|  71 ---
>  xen/arch/x86/hvm/vioapic.c |   2 +-
>  xen/arch/x86/hvm/viridian.c|  17 ++-
>  xen/arch/x86/hvm/vlapic.c  |  23 +++-
>  xen/arch/x86/hvm/vpic.c|   2 +-
>  xen/include/asm-x86/hvm/hvm.h  |   2 +
>  xen/include/asm-x86/hvm/save.h |   5 +-
>  18 files changed, 324 insertions(+), 147 deletions(-)
>
> diff --git a/tools/tests/vhpet/emul.h b/tools/tests/vhpet/emul.h
> index 383acff..99d5bbd 100644
> --- a/tools/tests/vhpet/emul.h
> +++ b/tools/tests/vhpet/emul.h
> @@ -296,7 +296,8 @@ struct hvm_hw_hpet
>  };
>
>  typedef int (*hvm_save_handler)(struct domain *d,
> -hvm_domain_context_t *h);
> +hvm_domain_context_t *h,
> +unsigned int instance);
>  typedef int (*hvm_load_handler)(struct domain *d,
>  hvm_domain_context_t *h);
>
> diff --git a/tools/tests/vhpet/main.c b/tools/tests/vhpet/main.c
> index 6fe65ea..3d8e7f5 100644
> --- a/tools/tests/vhpet/main.c
> +++ b/tools/tests/vhpet/main.c
> @@ -177,7 +177,7 @@ void __init hvm_register_savevm(uint16_t
> typecode,
>
>  int do_save(uint16_t typecode, struct domain *d,
> hvm_domain_context_t *h)
>  {
> -return hvm_sr_handlers[typecode].save(d, h);
> +return hvm_sr_handlers[typecode].save(d, h, d->max_vcpus);
>  }
>
>  int do_load(uint16_t typecode, struct domain *d,
> hvm_domain_context_t *h)
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c
> b/xen/arch/x86/cpu/mcheck/vmce.c
> index e07cd2f..a1a12a5 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -349,12 +349,24 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>  return ret;
>  }
>
> -static int vmce_save_vcpu_ctxt(struct domain *d,
> hvm_domain_context_t *h)
> +static int vmce_save_vcpu_ctxt(struct domain *d,
> hvm_domain_context_t *h, unsigned int instance)
>  {
>  struct vcpu *v;
>  int err = 0;
>
> -for_each_vcpu ( d, v )
> +if( instance < d->max_vcpus )
> +{
> +struct hvm_vmce_vcpu ctxt;
> +
> +v = d->vcpu[instance];
> +ctxt.caps = v->arch.vmce.mcg_cap;
> +ctxt.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
> +ctxt.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
> +ctxt.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
> +
> +err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
> +}
> +else for_each_vcpu ( d, v )
>  {
>  struct hvm_vmce_vcpu ctxt = {
>  .caps = v->arch.vmce.mcg_cap,
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 540ba08..d3c4e14 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -624,12 +624,10 @@ long arch_do_domctl(
>   !is_hvm_domain(d) )
>  break;
>
> -domain_pause(d);
>  ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
> domctl->u.hvmcontext_partial.instance,
> domctl->u.hvmcontext_partial.buffer,
> >u.hvmcontext_partial.bufsz);
> -domain_unpause(d);
>
>  if ( !ret )
>  copyback = true;
> diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
> index 3ea895a..56f4691 100644
> --- a/xen/arch/x86/hvm/hpet.c
> +++ b/xen/arch/x86/hvm/hpet.c
> @@ -509,7 +509,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops =
> {
>  };
>
>
> -static int hpet_save(struct domain *d, hvm_domain_context_t *h)
> +static int hpet_save(struct domain *d, hvm_domain_context_t *h,
> unsigned int instance)
>  {
>  HPETState *hp = domain_vhpet(d);
>  struct vcpu *v = pt_global_vcpu_target(d);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 205b4cb..140f2c3 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -728,13 +728,19 @@ void hvm_domain_destroy(struct domain *d)
>  }
>  }
>
> -static int hvm_save_tsc_adjust(struct domain *d,
> hvm_domain_context_t *h)
> +static int hvm_save_tsc_adjust(struct