Re: [Xen-devel] [PATCH RFC v3] x86/domctl: Don't pause the whole domain if only getting vcpu state

2018-04-24 Thread Jan Beulich
>>> On 23.02.18 at 16:32,  wrote:
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -349,20 +349,33 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>  return ret;
>  }
>  
> -static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
> +void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
> +{
> +ctxt->caps = v->arch.vmce.mcg_cap;
> +ctxt->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
> +ctxt->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
> +ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
> +}
> +
> +static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h, 
> unsigned int instance)
>  {
>  struct vcpu *v;
>  int err = 0;
>  
> -for_each_vcpu ( d, v )
> +if( instance < d->max_vcpus )
> +{
> +struct hvm_vmce_vcpu ctxt;
> +
> +v = d->vcpu[instance];
> +vmce_save_vcpu_ctxt_one(v, );

Please add a NULL check for v between these two lines, like we do everywhere
else when accessing d->vcpu[].

> +err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
> +}
> +else for_each_vcpu ( d, v )
>  {
> -struct hvm_vmce_vcpu ctxt = {
> -.caps = v->arch.vmce.mcg_cap,
> -.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
> -.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
> -.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl,
> -};
> +struct hvm_vmce_vcpu ctxt;
>  
> +vmce_save_vcpu_ctxt_one(v, );
>  err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
>  if ( err )
>  break;

Note the recurring pattern throughout the patch: 

if ( instance < d->max_vcpus )
{
...
xyz_save_one();
}
else for_each_vcpu ( d, v )
{
...
xyz_save_one();
}

I think you want to move this into the caller, such that each specific handler
only deals with a single instance. I also think you want to split the patch,
introducing the save_one() functions one at a time (used by their save()
counterparts), and only then dropping the save() ones in favor of doing the
loop in the caller. The final patch would then add the single instance case
you're after.

Also please pay attention to coding style.

Jan



___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH RFC v3] x86/domctl: Don't pause the whole domain if only getting vcpu state

2018-02-23 Thread Alexandru Isaila
This patch adds the hvm_save_one_cpu_ctxt() function.
It optimizes by only pausing the vcpu on all HVMSR_PER_VCPU save
callbacks where only data for one VCPU is required.

Signed-off-by: Alexandru Isaila 

---
Changes since V2:
- Added functions for all the save_*_one cases
- Added unpause to the error case in the hvm_save_one().
---
 tools/tests/vhpet/emul.h   |   3 +-
 tools/tests/vhpet/main.c   |   2 +-
 xen/arch/x86/cpu/mcheck/vmce.c |  29 +++-
 xen/arch/x86/domctl.c  |   2 -
 xen/arch/x86/hvm/hpet.c|   2 +-
 xen/arch/x86/hvm/hvm.c | 329 +
 xen/arch/x86/hvm/i8254.c   |   2 +-
 xen/arch/x86/hvm/irq.c |   6 +-
 xen/arch/x86/hvm/mtrr.c|  58 +---
 xen/arch/x86/hvm/pmtimer.c |   2 +-
 xen/arch/x86/hvm/rtc.c |   2 +-
 xen/arch/x86/hvm/save.c|  78 +++---
 xen/arch/x86/hvm/vioapic.c |   2 +-
 xen/arch/x86/hvm/viridian.c|  30 +++-
 xen/arch/x86/hvm/vlapic.c  |  23 ++-
 xen/arch/x86/hvm/vpic.c|   2 +-
 xen/include/asm-x86/hvm/hvm.h  |   2 +
 xen/include/asm-x86/hvm/save.h |   5 +-
 18 files changed, 381 insertions(+), 198 deletions(-)

diff --git a/tools/tests/vhpet/emul.h b/tools/tests/vhpet/emul.h
index 383acff..99d5bbd 100644
--- a/tools/tests/vhpet/emul.h
+++ b/tools/tests/vhpet/emul.h
@@ -296,7 +296,8 @@ struct hvm_hw_hpet
 };
 
 typedef int (*hvm_save_handler)(struct domain *d,
-hvm_domain_context_t *h);
+hvm_domain_context_t *h,
+unsigned int instance);
 typedef int (*hvm_load_handler)(struct domain *d,
 hvm_domain_context_t *h);
 
diff --git a/tools/tests/vhpet/main.c b/tools/tests/vhpet/main.c
index 6fe65ea..3d8e7f5 100644
--- a/tools/tests/vhpet/main.c
+++ b/tools/tests/vhpet/main.c
@@ -177,7 +177,7 @@ void __init hvm_register_savevm(uint16_t typecode,
 
 int do_save(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
 {
-return hvm_sr_handlers[typecode].save(d, h);
+return hvm_sr_handlers[typecode].save(d, h, d->max_vcpus);
 }
 
 int do_load(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index e07cd2f..98b12e0 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,20 +349,33 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
 return ret;
 }
 
-static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
+{
+ctxt->caps = v->arch.vmce.mcg_cap;
+ctxt->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+ctxt->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
+ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
+}
+
+static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
 struct vcpu *v;
 int err = 0;
 
-for_each_vcpu ( d, v )
+if( instance < d->max_vcpus )
+{
+struct hvm_vmce_vcpu ctxt;
+
+v = d->vcpu[instance];
+vmce_save_vcpu_ctxt_one(v, );
+
+err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
+}
+else for_each_vcpu ( d, v )
 {
-struct hvm_vmce_vcpu ctxt = {
-.caps = v->arch.vmce.mcg_cap,
-.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
-.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
-.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl,
-};
+struct hvm_vmce_vcpu ctxt;
 
+vmce_save_vcpu_ctxt_one(v, );
 err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, );
 if ( err )
 break;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 540ba08..d3c4e14 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -624,12 +624,10 @@ long arch_do_domctl(
  !is_hvm_domain(d) )
 break;
 
-domain_pause(d);
 ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
domctl->u.hvmcontext_partial.instance,
domctl->u.hvmcontext_partial.buffer,
>u.hvmcontext_partial.bufsz);
-domain_unpause(d);
 
 if ( !ret )
 copyback = true;
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 3ea895a..56f4691 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -509,7 +509,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
 };
 
 
-static int hpet_save(struct domain *d, hvm_domain_context_t *h)
+static int hpet_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
 HPETState *hp = domain_vhpet(d);
 struct vcpu *v = pt_global_vcpu_target(d);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 205b4cb..c7f38eb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++