Re: [Xen-devel] [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func

2018-07-17 Thread Isaila Alexandru
On Ma, 2018-07-17 at 08:03 -0600, Jan Beulich wrote:
> > 
> > > 
> > > > 
> > > > On 17.07.18 at 14:25,  wrote:
> > On Lu, 2018-07-16 at 15:29 +, Paul Durrant wrote:
> > > 
> > > > 
> > > > From: Alexandru Isaila [mailto:aisa...@bitdefender.com]
> > > > Sent: 16 July 2018 15:55
> > > > --- a/xen/arch/x86/hvm/hvm.c
> > > > +++ b/xen/arch/x86/hvm/hvm.c
> > > > @@ -787,119 +787,129 @@ static int hvm_load_tsc_adjust(struct
> > > > domain *d,
> > > > hvm_domain_context_t *h)
> > > >  HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
> > > >    hvm_load_tsc_adjust, 1,
> > > > HVMSR_PER_VCPU);
> > > > 
> > > > +static int hvm_save_cpu_ctxt_one(struct vcpu *v,
> > > > hvm_domain_context_t
> > > > *h)
> > > > +{
> > > > +struct segment_register seg;
> > > > +struct hvm_hw_cpu ctxt;
> > > > +
> > > > +memset(, 0, sizeof(ctxt));
> > > Why not use an = {} initializer instead of the memset here like
> > > elsewhere?
> > I wanted to make less change as possible and I only added a
> > initializer
> > where there was none. 
> Trying to limit patch impact is certainly appreciated, but please
> take a
> look at your patch to see whether this would really have made much
> of a difference.
> 
I will change this in the next version but I will wait for more
comments on the rest of the patches. 

Regards, 
Alex

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func

2018-07-17 Thread Jan Beulich
>>> On 17.07.18 at 14:25,  wrote:
> On Lu, 2018-07-16 at 15:29 +, Paul Durrant wrote:
>> > From: Alexandru Isaila [mailto:aisa...@bitdefender.com]
>> > Sent: 16 July 2018 15:55
>> > --- a/xen/arch/x86/hvm/hvm.c
>> > +++ b/xen/arch/x86/hvm/hvm.c
>> > @@ -787,119 +787,129 @@ static int hvm_load_tsc_adjust(struct
>> > domain *d,
>> > hvm_domain_context_t *h)
>> >  HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
>> >hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
>> > 
>> > +static int hvm_save_cpu_ctxt_one(struct vcpu *v,
>> > hvm_domain_context_t
>> > *h)
>> > +{
>> > +struct segment_register seg;
>> > +struct hvm_hw_cpu ctxt;
>> > +
>> > +memset(, 0, sizeof(ctxt));
>> Why not use an = {} initializer instead of the memset here like
>> elsewhere?
> 
> I wanted to make less change as possible and I only added a initializer
> where there was none. 

Trying to limit patch impact is certainly appreciated, but please take a
look at your patch to see whether this would really have made much
of a difference.

Jan



___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func

2018-07-17 Thread Isaila Alexandru
On Lu, 2018-07-16 at 15:29 +, Paul Durrant wrote:
> > 
> > -Original Message-
> > From: Alexandru Isaila [mailto:aisa...@bitdefender.com]
> > Sent: 16 July 2018 15:55
> > To: xen-de...@lists.xen.org
> > Cc: Ian Jackson ; Wei Liu  > com>;
> > jbeul...@suse.com; Andrew Cooper ; Paul
> > Durrant ; Alexandru Isaila
> > 
> > Subject: [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one
> > func
> > 
> > This is used to save data from a single instance.
> > 
> > Signed-off-by: Alexandru Isaila 
> > 
> > ---
> > Changes since V11:
> > - hvm_save_cpu_ctxt() now returns err from
> >   hvm_save_cpu_ctxt_one().
> > ---
> >  xen/arch/x86/hvm/hvm.c | 216 ++---
> > -
> > ---
> >  1 file changed, 113 insertions(+), 103 deletions(-)
> > 
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index dd88751..e20a25c 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -787,119 +787,129 @@ static int hvm_load_tsc_adjust(struct
> > domain *d,
> > hvm_domain_context_t *h)
> >  HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
> >    hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
> > 
> > +static int hvm_save_cpu_ctxt_one(struct vcpu *v,
> > hvm_domain_context_t
> > *h)
> > +{
> > +struct segment_register seg;
> > +struct hvm_hw_cpu ctxt;
> > +
> > +memset(, 0, sizeof(ctxt));
> Why not use an = {} initializer instead of the memset here like
> elsewhere?
> 
>   Paul

I wanted to make less change as possible and I only added a initializer
where there was none. 

Alex 
> > 
> > +
> > +/* Architecture-specific vmcs/vmcb bits */
> > +hvm_funcs.save_cpu_ctxt(v, );
> > +
> > +ctxt.tsc = hvm_get_guest_tsc_fixed(v, v->domain-
> > > 
> > > arch.hvm_domain.sync_tsc);
> > +
> > +ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
> > +
> > +hvm_get_segment_register(v, x86_seg_idtr, );
> > +ctxt.idtr_limit = seg.limit;
> > +ctxt.idtr_base = seg.base;
> > +
> > +hvm_get_segment_register(v, x86_seg_gdtr, );
> > +ctxt.gdtr_limit = seg.limit;
> > +ctxt.gdtr_base = seg.base;
> > +
> > +hvm_get_segment_register(v, x86_seg_cs, );
> > +ctxt.cs_sel = seg.sel;
> > +ctxt.cs_limit = seg.limit;
> > +ctxt.cs_base = seg.base;
> > +ctxt.cs_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_ds, );
> > +ctxt.ds_sel = seg.sel;
> > +ctxt.ds_limit = seg.limit;
> > +ctxt.ds_base = seg.base;
> > +ctxt.ds_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_es, );
> > +ctxt.es_sel = seg.sel;
> > +ctxt.es_limit = seg.limit;
> > +ctxt.es_base = seg.base;
> > +ctxt.es_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_ss, );
> > +ctxt.ss_sel = seg.sel;
> > +ctxt.ss_limit = seg.limit;
> > +ctxt.ss_base = seg.base;
> > +ctxt.ss_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_fs, );
> > +ctxt.fs_sel = seg.sel;
> > +ctxt.fs_limit = seg.limit;
> > +ctxt.fs_base = seg.base;
> > +ctxt.fs_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_gs, );
> > +ctxt.gs_sel = seg.sel;
> > +ctxt.gs_limit = seg.limit;
> > +ctxt.gs_base = seg.base;
> > +ctxt.gs_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_tr, );
> > +ctxt.tr_sel = seg.sel;
> > +ctxt.tr_limit = seg.limit;
> > +ctxt.tr_base = seg.base;
> > +ctxt.tr_arbytes = seg.attr;
> > +
> > +hvm_get_segment_register(v, x86_seg_ldtr, );
> > +ctxt.ldtr_sel = seg.sel;
> > +ctxt.ldtr_limit = seg.limit;
> > +ctxt.ldtr_base = seg.base;
> > +ctxt.ldtr_arbytes = seg.attr;
> > +
> > +if ( v->fpu_initialised )
> > +{
> > +memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt,
> > sizeof(ctxt.fpu_regs));
> > +ctxt.flags = XEN_X86_FPU_INITIALISED;
> > +}
> > +
> > +ctxt.rax = v->arch.user_regs.rax;
> > +ctxt.rbx = v->arch.user_regs.rbx;
> > +ctxt.rcx = v->arch.user_regs.rcx;
> > +ctxt.rdx = v->arch.user_regs.rdx;
> > +ctxt.rbp = v->arch.user_regs.rbp;
> > +ctxt.rsi = v->arch.user_regs.rsi;
> > +ctxt.rdi = v->arch.user_regs.rdi;
> > +ctxt.rsp = v->arch.user_regs.rsp;
> > +ctxt.rip = v->arch.user_regs.rip;
> > +ctxt.rflags = v->arch.user_regs.rflags;
> > +ctxt.r8  = v->arch.user_regs.r8;
> > +ctxt.r9  = v->arch.user_regs.r9;
> > +ctxt.r10 = v->arch.user_regs.r10;
> > +ctxt.r11 = v->arch.user_regs.r11;
> > +ctxt.r12 = v->arch.user_regs.r12;
> > +ctxt.r13 = v->arch.user_regs.r13;
> > +ctxt.r14 = v->arch.user_regs.r14;
> > +ctxt.r15 = v->arch.user_regs.r15;
> > +ctxt.dr0 = v->arch.debugreg[0];
> > +ctxt.dr1 = v->arch.debugreg[1];
> > +ctxt.dr2 = v->arch.debugreg[2];
> > +ctxt.dr3 = v->arch.debugreg[3];
> > +ctxt.dr6 = v->arch.debugreg[6];
> > +ctxt.dr7 = v->arch.debugreg[7];
> > +
> > +return hvm_save_entry(CPU, 

Re: [Xen-devel] [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func

2018-07-16 Thread Paul Durrant
> -Original Message-
> From: Alexandru Isaila [mailto:aisa...@bitdefender.com]
> Sent: 16 July 2018 15:55
> To: xen-de...@lists.xen.org
> Cc: Ian Jackson ; Wei Liu ;
> jbeul...@suse.com; Andrew Cooper ; Paul
> Durrant ; Alexandru Isaila
> 
> Subject: [PATCH v12 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one
> func
> 
> This is used to save data from a single instance.
> 
> Signed-off-by: Alexandru Isaila 
> 
> ---
> Changes since V11:
>   - hvm_save_cpu_ctxt() now returns err from
> hvm_save_cpu_ctxt_one().
> ---
>  xen/arch/x86/hvm/hvm.c | 216 ++
> ---
>  1 file changed, 113 insertions(+), 103 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index dd88751..e20a25c 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -787,119 +787,129 @@ static int hvm_load_tsc_adjust(struct domain *d,
> hvm_domain_context_t *h)
>  HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
>hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
> 
> +static int hvm_save_cpu_ctxt_one(struct vcpu *v, hvm_domain_context_t
> *h)
> +{
> +struct segment_register seg;
> +struct hvm_hw_cpu ctxt;
> +
> +memset(, 0, sizeof(ctxt));

Why not use an = {} initializer instead of the memset here like elsewhere?

  Paul

> +
> +/* Architecture-specific vmcs/vmcb bits */
> +hvm_funcs.save_cpu_ctxt(v, );
> +
> +ctxt.tsc = hvm_get_guest_tsc_fixed(v, v->domain-
> >arch.hvm_domain.sync_tsc);
> +
> +ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
> +
> +hvm_get_segment_register(v, x86_seg_idtr, );
> +ctxt.idtr_limit = seg.limit;
> +ctxt.idtr_base = seg.base;
> +
> +hvm_get_segment_register(v, x86_seg_gdtr, );
> +ctxt.gdtr_limit = seg.limit;
> +ctxt.gdtr_base = seg.base;
> +
> +hvm_get_segment_register(v, x86_seg_cs, );
> +ctxt.cs_sel = seg.sel;
> +ctxt.cs_limit = seg.limit;
> +ctxt.cs_base = seg.base;
> +ctxt.cs_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_ds, );
> +ctxt.ds_sel = seg.sel;
> +ctxt.ds_limit = seg.limit;
> +ctxt.ds_base = seg.base;
> +ctxt.ds_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_es, );
> +ctxt.es_sel = seg.sel;
> +ctxt.es_limit = seg.limit;
> +ctxt.es_base = seg.base;
> +ctxt.es_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_ss, );
> +ctxt.ss_sel = seg.sel;
> +ctxt.ss_limit = seg.limit;
> +ctxt.ss_base = seg.base;
> +ctxt.ss_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_fs, );
> +ctxt.fs_sel = seg.sel;
> +ctxt.fs_limit = seg.limit;
> +ctxt.fs_base = seg.base;
> +ctxt.fs_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_gs, );
> +ctxt.gs_sel = seg.sel;
> +ctxt.gs_limit = seg.limit;
> +ctxt.gs_base = seg.base;
> +ctxt.gs_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_tr, );
> +ctxt.tr_sel = seg.sel;
> +ctxt.tr_limit = seg.limit;
> +ctxt.tr_base = seg.base;
> +ctxt.tr_arbytes = seg.attr;
> +
> +hvm_get_segment_register(v, x86_seg_ldtr, );
> +ctxt.ldtr_sel = seg.sel;
> +ctxt.ldtr_limit = seg.limit;
> +ctxt.ldtr_base = seg.base;
> +ctxt.ldtr_arbytes = seg.attr;
> +
> +if ( v->fpu_initialised )
> +{
> +memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
> +ctxt.flags = XEN_X86_FPU_INITIALISED;
> +}
> +
> +ctxt.rax = v->arch.user_regs.rax;
> +ctxt.rbx = v->arch.user_regs.rbx;
> +ctxt.rcx = v->arch.user_regs.rcx;
> +ctxt.rdx = v->arch.user_regs.rdx;
> +ctxt.rbp = v->arch.user_regs.rbp;
> +ctxt.rsi = v->arch.user_regs.rsi;
> +ctxt.rdi = v->arch.user_regs.rdi;
> +ctxt.rsp = v->arch.user_regs.rsp;
> +ctxt.rip = v->arch.user_regs.rip;
> +ctxt.rflags = v->arch.user_regs.rflags;
> +ctxt.r8  = v->arch.user_regs.r8;
> +ctxt.r9  = v->arch.user_regs.r9;
> +ctxt.r10 = v->arch.user_regs.r10;
> +ctxt.r11 = v->arch.user_regs.r11;
> +ctxt.r12 = v->arch.user_regs.r12;
> +ctxt.r13 = v->arch.user_regs.r13;
> +ctxt.r14 = v->arch.user_regs.r14;
> +ctxt.r15 = v->arch.user_regs.r15;
> +ctxt.dr0 = v->arch.debugreg[0];
> +ctxt.dr1 = v->arch.debugreg[1];
> +ctxt.dr2 = v->arch.debugreg[2];
> +ctxt.dr3 = v->arch.debugreg[3];
> +ctxt.dr6 = v->arch.debugreg[6];
> +ctxt.dr7 = v->arch.debugreg[7];
> +
> +return hvm_save_entry(CPU, v->vcpu_id, h, );
> +}
> +
>  static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>  {
>  struct vcpu *v;
> -struct hvm_hw_cpu ctxt;
> -struct segment_register seg;
> +int err = 0;
> 
>  for_each_vcpu ( d, v )
>  {
> -/* We don't need to save state for a vcpu that is down; the restore
> - * code will leave it down if there is nothing saved. */
> +/*
> + * We don't need to save state for a vcpu that