Re: [Xen-devel] [PATCH RFC 28/31] xen/x86: Context switch all levelling state in context_switch()

2016-01-22 Thread Jan Beulich
>>> On 16.12.15 at 22:24,  wrote:
> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -300,6 +300,9 @@ static void __init noinline amd_init_levelling(void)
>   cpumask_defaults._6c &= (~0ULL << 32);
>   cpumask_defaults._6c |= ecx;
>   }
> +
> +if (levelling_caps)
> +ctxt_switch_levelling = amd_ctxt_switch_levelling;
>  }

Indentation.

> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -86,6 +86,13 @@ static const struct cpu_dev default_cpu = {
>  };
>  static const struct cpu_dev *this_cpu = _cpu;
>  
> +void default_ctxt_switch_levelling(const struct domain *nextd)

static

> +{
> +/* Nop */
> +}
> +void (*ctxt_switch_levelling)(const struct domain *nextd) __read_mostly =
> +default_ctxt_switch_levelling;

While current and past gcc may accept (and honor) this placement of
the __read_mostly annotation, I think it is wrong from a strict language
syntax pov. Imo it instead ought to be

void (*__read_mostly ctxt_switch_levelling)(const struct domain *nextd) =

Also - indentation again.

> @@ -145,6 +145,13 @@ void intel_ctxt_switch_levelling(const struct domain 
> *nextd)
>   struct cpumasks *these_masks = _cpu(cpumasks);
>   const struct cpumasks *masks = _defaults;
>  
> + if (cpu_has_cpuid_faulting) {
> + set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
> +!is_control_domain(nextd) &&
> +!is_hardware_domain(nextd));
> + return;
> + }

Considering that you don't even probe the masking MSRs, this seems
inconsistent with your "always level the entire system" choice. As said
I'm opposed to that (i.e. the code here is fine), but at the very least
things ought to be consistent.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH RFC 28/31] xen/x86: Context switch all levelling state in context_switch()

2016-01-22 Thread Andrew Cooper
On 22/01/16 09:52, Jan Beulich wrote:
 On 16.12.15 at 22:24,  wrote:
>> --- a/xen/arch/x86/cpu/amd.c
>> +++ b/xen/arch/x86/cpu/amd.c
>> @@ -300,6 +300,9 @@ static void __init noinline amd_init_levelling(void)
>>  cpumask_defaults._6c &= (~0ULL << 32);
>>  cpumask_defaults._6c |= ecx;
>>  }
>> +
>> +if (levelling_caps)
>> +ctxt_switch_levelling = amd_ctxt_switch_levelling;
>>  }
> Indentation.
>
>> --- a/xen/arch/x86/cpu/common.c
>> +++ b/xen/arch/x86/cpu/common.c
>> @@ -86,6 +86,13 @@ static const struct cpu_dev default_cpu = {
>>  };
>>  static const struct cpu_dev *this_cpu = _cpu;
>>  
>> +void default_ctxt_switch_levelling(const struct domain *nextd)
> static
>
>> +{
>> +/* Nop */
>> +}
>> +void (*ctxt_switch_levelling)(const struct domain *nextd) __read_mostly =
>> +default_ctxt_switch_levelling;
> While current and past gcc may accept (and honor) this placement of
> the __read_mostly annotation, I think it is wrong from a strict language
> syntax pov. Imo it instead ought to be
>
> void (*__read_mostly ctxt_switch_levelling)(const struct domain *nextd) =
>
> Also - indentation again.
>
>> @@ -145,6 +145,13 @@ void intel_ctxt_switch_levelling(const struct domain 
>> *nextd)
>>  struct cpumasks *these_masks = _cpu(cpumasks);
>>  const struct cpumasks *masks = _defaults;
>>  
>> +if (cpu_has_cpuid_faulting) {
>> +set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
>> +   !is_control_domain(nextd) &&
>> +   !is_hardware_domain(nextd));
>> +return;
>> +}
> Considering that you don't even probe the masking MSRs, this seems
> inconsistent with your "always level the entire system" choice.

In the case that faulting is available, we never want to touch masking. 
Faulting is newer and strictly superior to masking.

As documented, there is no hardware which support both.  (In reality,
there is one SKU of IvyBridge CPUs which experimentally have both.)


The fact that dom0 and the hardware domain are bypassed is a bug IMO. 
However, I am preserving the existing behaviour until phase 2 when I fix
other parts of the cpuid policy.  Currently imposing faulting on dom0
causes carnage because nothing generates it a sane policy.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH RFC 28/31] xen/x86: Context switch all levelling state in context_switch()

2016-01-22 Thread Jan Beulich
>>> On 22.01.16 at 15:19,  wrote:
> On 22/01/16 09:52, Jan Beulich wrote:
> On 16.12.15 at 22:24,  wrote:
>>> @@ -145,6 +145,13 @@ void intel_ctxt_switch_levelling(const struct domain 
>>> *nextd)
>>> struct cpumasks *these_masks = _cpu(cpumasks);
>>> const struct cpumasks *masks = _defaults;
>>>  
>>> +   if (cpu_has_cpuid_faulting) {
>>> +   set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
>>> +  !is_control_domain(nextd) &&
>>> +  !is_hardware_domain(nextd));
>>> +   return;
>>> +   }
>> Considering that you don't even probe the masking MSRs, this seems
>> inconsistent with your "always level the entire system" choice.
> 
> In the case that faulting is available, we never want to touch masking. 
> Faulting is newer and strictly superior to masking.
> 
> As documented, there is no hardware which support both.  (In reality,
> there is one SKU of IvyBridge CPUs which experimentally have both.)
> 
> 
> The fact that dom0 and the hardware domain are bypassed is a bug IMO. 

And we appear to disagree here. I'd rather see the rest of the
series match this current behavior.

> However, I am preserving the existing behaviour until phase 2 when I fix
> other parts of the cpuid policy.  Currently imposing faulting on dom0
> causes carnage because nothing generates it a sane policy.
> 
> ~Andrew




___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH RFC 28/31] xen/x86: Context switch all levelling state in context_switch()

2016-01-22 Thread Andrew Cooper
On 22/01/16 14:31, Jan Beulich wrote:
 On 22.01.16 at 15:19,  wrote:
>> On 22/01/16 09:52, Jan Beulich wrote:
>> On 16.12.15 at 22:24,  wrote:
 @@ -145,6 +145,13 @@ void intel_ctxt_switch_levelling(const struct domain 
 *nextd)
struct cpumasks *these_masks = _cpu(cpumasks);
const struct cpumasks *masks = _defaults;
  
 +  if (cpu_has_cpuid_faulting) {
 +  set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
 + !is_control_domain(nextd) &&
 + !is_hardware_domain(nextd));
 +  return;
 +  }
>>> Considering that you don't even probe the masking MSRs, this seems
>>> inconsistent with your "always level the entire system" choice.
>> In the case that faulting is available, we never want to touch masking. 
>> Faulting is newer and strictly superior to masking.
>>
>> As documented, there is no hardware which support both.  (In reality,
>> there is one SKU of IvyBridge CPUs which experimentally have both.)
>>
>>
>> The fact that dom0 and the hardware domain are bypassed is a bug IMO. 
> And we appear to disagree here. I'd rather see the rest of the
> series match this current behavior.

I am planning to fix it, but it is the same quantity of work again, on
top of this series.  I am deliberately not conflating all of the cpuid
related fixes into one series, because it is simply too much work to do
in one go.

Dom0 still gets its "feature levelled" system via emulated cpuid, just
as it does at the moment.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH RFC 28/31] xen/x86: Context switch all levelling state in context_switch()

2015-12-16 Thread Andrew Cooper
A single ctxt_switch_levelling() function pointer is provided
(defaulting to an empty nop), which is overridden in the appropriate
$VENDOR_init_levelling().

set_cpuid_faulting() is made private and included within
intel_ctxt_switch_levelling()

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
---
 xen/arch/x86/cpu/amd.c  |  3 +++
 xen/arch/x86/cpu/common.c   |  7 +++
 xen/arch/x86/cpu/intel.c| 14 --
 xen/arch/x86/domain.c   |  4 +---
 xen/include/asm-x86/processor.h |  2 +-
 5 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 6b95ab6..cc1344a 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -300,6 +300,9 @@ static void __init noinline amd_init_levelling(void)
cpumask_defaults._6c &= (~0ULL << 32);
cpumask_defaults._6c |= ecx;
}
+
+if (levelling_caps)
+ctxt_switch_levelling = amd_ctxt_switch_levelling;
 }
 
 /*
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 896a579..4802ce8 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -86,6 +86,13 @@ static const struct cpu_dev default_cpu = {
 };
 static const struct cpu_dev *this_cpu = _cpu;
 
+void default_ctxt_switch_levelling(const struct domain *nextd)
+{
+/* Nop */
+}
+void (*ctxt_switch_levelling)(const struct domain *nextd) __read_mostly =
+default_ctxt_switch_levelling;
+
 bool_t opt_cpu_info;
 boolean_param("cpuinfo", opt_cpu_info);
 
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index f1bb222..87c66d2 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -32,9 +32,9 @@ static bool_t __init probe_intel_cpuid_faulting(void)
return 1;
 }
 
-static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
-void set_cpuid_faulting(bool_t enable)
+static void set_cpuid_faulting(bool_t enable)
 {
+   static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
uint32_t hi, lo;
 
if (!cpu_has_cpuid_faulting ||
@@ -145,6 +145,13 @@ void intel_ctxt_switch_levelling(const struct domain 
*nextd)
struct cpumasks *these_masks = _cpu(cpumasks);
const struct cpumasks *masks = _defaults;
 
+   if (cpu_has_cpuid_faulting) {
+   set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
+  !is_control_domain(nextd) &&
+  !is_hardware_domain(nextd));
+   return;
+   }
+
 #define LAZY(msr, field)   \
({  \
if ( msr && (these_masks->field != masks->field) )  \
@@ -208,6 +215,9 @@ static void __init noinline intel_init_levelling(void)
}
else if ( !~opt_cpuid_mask_xsave_eax )
printk("No CPUID xsave feature mask available\n");
+
+   if (levelling_caps)
+   ctxt_switch_levelling = intel_ctxt_switch_levelling;
 }
 
 static void early_init_intel(struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 51adb8d..e9f1af1 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1639,9 +1639,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 load_segments(next);
 }
 
-set_cpuid_faulting(is_pv_domain(nextd) &&
-   !is_control_domain(nextd) &&
-   !is_hardware_domain(nextd));
+ctxt_switch_levelling(nextd);
 }
 
 context_saved(prev);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 6d2f09b..52e5eae 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -204,7 +204,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
 extern struct cpuinfo_x86 cpu_data[];
 #define current_cpu_data cpu_data[smp_processor_id()]
 
-extern void set_cpuid_faulting(bool_t enable);
+extern void (*ctxt_switch_levelling)(const struct domain *nextd);
 
 extern u64 host_pat;
 extern bool_t opt_cpu_info;
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel