Re: [Xen-devel] [PATCH v8 10/17] x86/hvm: Permit guests direct access to MSR_{SPEC_CTRL, PRED_CMD}

2018-01-15 Thread Tian, Kevin
> From: Jan Beulich [mailto:jbeul...@suse.com]
> Sent: Monday, January 15, 2018 7:11 PM
> 
> >>> On 12.01.18 at 19:01,  wrote:
> > For performance reasons, HVM guests should have direct access to these
> MSRs
> > when possible.
> >
> > Signed-off-by: Andrew Cooper 
> 
> Reviewed-by: Jan Beulich 
> with one spelling fix (see below)
> 
> As these are non-trivial changes to VMX and SVM code I think you
> should have Cc-ed the maintainers (now added, and leaving the
> full patch in context for them.
> 

Reviewed-by: Kevin Tian , with other
two spelling fixes.

> > ---
> > v7:
> >  * Drop excess brackets
> > ---
> >  xen/arch/x86/domctl.c  | 19 +++
> >  xen/arch/x86/hvm/svm/svm.c |  5 +
> >  xen/arch/x86/hvm/vmx/vmx.c | 18 ++
> >  xen/arch/x86/msr.c |  3 ++-
> >  xen/include/asm-x86/msr.h  |  5 -
> >  5 files changed, 48 insertions(+), 2 deletions(-)
> >
> > diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> > index 72b4489..e5fdde7 100644
> > --- a/xen/arch/x86/domctl.c
> > +++ b/xen/arch/x86/domctl.c
> > @@ -53,6 +53,7 @@ static int update_domain_cpuid_info(struct domain
> *d,
> >  struct cpuid_policy *p = d->arch.cpuid;
> >  const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, 
> > ctl->edx };
> >  int old_vendor = p->x86_vendor;
> > +unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
> >  bool call_policy_changed = false; /* Avoid for_each_vcpu()
> unnecessarily */
> >
> >  /*
> > @@ -218,6 +219,14 @@ static int update_domain_cpuid_info(struct
> domain *d,
> >
> >  d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
> >  }
> > +
> > +/*
> > + * If the IBSRB/STIBP policy has changed, we need to recalculate 
> > the

IBRSB

> > + * MSR interception bitmaps and STIBP protection default.
> > + */
> > +call_policy_changed = ((old_7d0 ^ p->feat.raw[0].d) &
> > +   (cpufeat_mask(X86_FEATURE_IBRSB) |
> > +cpufeat_mask(X86_FEATURE_STIBP)));
> >  break;
> >
> >  case 0xa:
> > @@ -292,6 +301,16 @@ static int update_domain_cpuid_info(struct
> domain *d,
> >  d->arch.pv_domain.cpuidmasks->e1cd = mask;
> >  }
> >  break;
> > +
> > +case 0x8008:
> > +/*
> > + * If the IBRB policy has changed, we need to recalculate the MSR

IBPB

> > + * interception bitmaps.
> > + */
> > +call_policy_changed = (is_hvm_domain(d) &&
> > +   ((old_e8b ^ p->extd.raw[8].b) &
> > +cpufeat_mask(X86_FEATURE_IBPB)));
> > +break;
> >  }
> >
> >  if ( call_policy_changed )
> > diff --git a/xen/arch/x86/hvm/svm/svm.c
> b/xen/arch/x86/hvm/svm/svm.c
> > index c48fdfa..ee47508 100644
> > --- a/xen/arch/x86/hvm/svm/svm.c
> > +++ b/xen/arch/x86/hvm/svm/svm.c
> > @@ -617,6 +617,7 @@ static void svm_cpuid_policy_changed(struct vcpu
> *v)
> >  {
> >  struct arch_svm_struct *arch_svm = >arch.hvm_svm;
> >  struct vmcb_struct *vmcb = arch_svm->vmcb;
> > +const struct cpuid_policy *cp = v->domain->arch.cpuid;
> >  u32 bitmap = vmcb_get_exception_intercepts(vmcb);
> >
> >  if ( opt_hvm_fep ||
> > @@ -626,6 +627,10 @@ static void svm_cpuid_policy_changed(struct
> vcpu *v)
> >  bitmap &= ~(1U << TRAP_invalid_op);
> >
> >  vmcb_set_exception_intercepts(vmcb, bitmap);
> > +
> > +/* Give access to MSR_PRED_CMD if the guest has been told about it.
> */
> > +svm_intercept_msr(v, MSR_PRED_CMD,
> > +  cp->extd.ibpb ? MSR_INTERCEPT_NONE :
> MSR_INTERCEPT_RW);
> >  }
> >
> >  static void svm_sync_vmcb(struct vcpu *v)
> > diff --git a/xen/arch/x86/hvm/vmx/vmx.c
> b/xen/arch/x86/hvm/vmx/vmx.c
> > index e036303..8609de3 100644
> > --- a/xen/arch/x86/hvm/vmx/vmx.c
> > +++ b/xen/arch/x86/hvm/vmx/vmx.c
> > @@ -656,6 +656,8 @@ void vmx_update_exception_bitmap(struct vcpu
> *v)
> >
> >  static void vmx_cpuid_policy_changed(struct vcpu *v)
> >  {
> > +const struct cpuid_policy *cp = v->domain->arch.cpuid;
> > +
> >  if ( opt_hvm_fep ||
> >   (v->domain->arch.cpuid->x86_vendor !=
> boot_cpu_data.x86_vendor) )
> >  v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
> > @@ -665,6 +667,22 @@ static void vmx_cpuid_policy_changed(struct
> vcpu *v)
> >  vmx_vmcs_enter(v);
> >  vmx_update_exception_bitmap(v);
> >  vmx_vmcs_exit(v);
> > +
> > +/*
> > + * We can only pass though MSR_SPEC_CTRL if the guest knows about
> all bits
> 
> "through"
> 
> Jan
> 
> > + * in it.  Otherwise, Xen may be fixing up in the background.
> > + */
> > +v->arch.msr->spec_ctrl.direct_access = cp->feat.ibrsb && cp-
> >feat.stibp;
> > +if ( v->arch.msr->spec_ctrl.direct_access )
> > +

Re: [Xen-devel] [PATCH v8 10/17] x86/hvm: Permit guests direct access to MSR_{SPEC_CTRL, PRED_CMD}

2018-01-15 Thread Boris Ostrovsky



On 01/15/2018 06:11 AM, Jan Beulich wrote:

On 12.01.18 at 19:01,  wrote:

For performance reasons, HVM guests should have direct access to these MSRs
when possible.

Signed-off-by: Andrew Cooper 


Reviewed-by: Jan Beulich 
with one spelling fix (see below)

As these are non-trivial changes to VMX and SVM code I think you
should have Cc-ed the maintainers (now added, and leaving the
full patch in context for them.



Reviewed-by: Boris Ostrovsky 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v8 10/17] x86/hvm: Permit guests direct access to MSR_{SPEC_CTRL, PRED_CMD}

2018-01-12 Thread Andrew Cooper
For performance reasons, HVM guests should have direct access to these MSRs
when possible.

Signed-off-by: Andrew Cooper 
---
v7:
 * Drop excess brackets
---
 xen/arch/x86/domctl.c  | 19 +++
 xen/arch/x86/hvm/svm/svm.c |  5 +
 xen/arch/x86/hvm/vmx/vmx.c | 18 ++
 xen/arch/x86/msr.c |  3 ++-
 xen/include/asm-x86/msr.h  |  5 -
 5 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 72b4489..e5fdde7 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -53,6 +53,7 @@ static int update_domain_cpuid_info(struct domain *d,
 struct cpuid_policy *p = d->arch.cpuid;
 const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
 int old_vendor = p->x86_vendor;
+unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
 bool call_policy_changed = false; /* Avoid for_each_vcpu() unnecessarily */
 
 /*
@@ -218,6 +219,14 @@ static int update_domain_cpuid_info(struct domain *d,
 
 d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
 }
+
+/*
+ * If the IBSRB/STIBP policy has changed, we need to recalculate the
+ * MSR interception bitmaps and STIBP protection default.
+ */
+call_policy_changed = ((old_7d0 ^ p->feat.raw[0].d) &
+   (cpufeat_mask(X86_FEATURE_IBRSB) |
+cpufeat_mask(X86_FEATURE_STIBP)));
 break;
 
 case 0xa:
@@ -292,6 +301,16 @@ static int update_domain_cpuid_info(struct domain *d,
 d->arch.pv_domain.cpuidmasks->e1cd = mask;
 }
 break;
+
+case 0x8008:
+/*
+ * If the IBRB policy has changed, we need to recalculate the MSR
+ * interception bitmaps.
+ */
+call_policy_changed = (is_hvm_domain(d) &&
+   ((old_e8b ^ p->extd.raw[8].b) &
+cpufeat_mask(X86_FEATURE_IBPB)));
+break;
 }
 
 if ( call_policy_changed )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c48fdfa..ee47508 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -617,6 +617,7 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
 {
 struct arch_svm_struct *arch_svm = >arch.hvm_svm;
 struct vmcb_struct *vmcb = arch_svm->vmcb;
+const struct cpuid_policy *cp = v->domain->arch.cpuid;
 u32 bitmap = vmcb_get_exception_intercepts(vmcb);
 
 if ( opt_hvm_fep ||
@@ -626,6 +627,10 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
 bitmap &= ~(1U << TRAP_invalid_op);
 
 vmcb_set_exception_intercepts(vmcb, bitmap);
+
+/* Give access to MSR_PRED_CMD if the guest has been told about it. */
+svm_intercept_msr(v, MSR_PRED_CMD,
+  cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
 }
 
 static void svm_sync_vmcb(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e036303..8609de3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -656,6 +656,8 @@ void vmx_update_exception_bitmap(struct vcpu *v)
 
 static void vmx_cpuid_policy_changed(struct vcpu *v)
 {
+const struct cpuid_policy *cp = v->domain->arch.cpuid;
+
 if ( opt_hvm_fep ||
  (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
 v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
@@ -665,6 +667,22 @@ static void vmx_cpuid_policy_changed(struct vcpu *v)
 vmx_vmcs_enter(v);
 vmx_update_exception_bitmap(v);
 vmx_vmcs_exit(v);
+
+/*
+ * We can only pass though MSR_SPEC_CTRL if the guest knows about all bits
+ * in it.  Otherwise, Xen may be fixing up in the background.
+ */
+v->arch.msr->spec_ctrl.direct_access = cp->feat.ibrsb && cp->feat.stibp;
+if ( v->arch.msr->spec_ctrl.direct_access )
+vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+else
+vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+
+/* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
+if ( cp->feat.ibrsb || cp->extd.ibpb )
+vmx_clear_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+else
+vmx_set_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
 }
 
 int vmx_guest_x86_mode(struct vcpu *v)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 02a7b49..697cc6e 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -132,7 +132,8 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, 
uint64_t *val)
 case MSR_SPEC_CTRL:
 if ( !cp->feat.ibrsb )
 goto gp_fault;
-*val = vp->spec_ctrl.guest;
+*val = (vp->spec_ctrl.direct_access
+? vp->spec_ctrl.host : vp->spec_ctrl.guest);
 break;
 
 case MSR_INTEL_PLATFORM_INFO:
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h