Re: [PATCH v2 3/8] x86: Grant AMX permission for guest

2022-02-25 Thread Yang Zhong
On Thu, Feb 17, 2022 at 02:44:10PM +0100, Paolo Bonzini wrote:
> On 2/17/22 06:58, Yang Zhong wrote:
> >>+
> >>+if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
> >>+bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
> >>+if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
> >Paolo, last time you suggested below changes for here:
> >
> >rc = kvm_arch_get_supported_cpuid(s, 0xd, 0,
> >   (xdata_bit < 32 ? R_EAX : R_EDX));
> >if (!(rc & BIT(xdata_bit & 31)) {
> >   ...
> >}
> >
> >   Since I used "mask" as parameter here, so I had to directly use R_EAX 
> > here.
> >   Please review and if need change it to like "(xdata_bit < 32 ? R_EAX : 
> > R_EDX)",
> >   I will change this in next version, thanks!
> 
> I looked at this function more closely because it didn't compile on non-Linux
> systems, too.  I think it's better to write it already to plan for more
> dynamic features.  In the code below, I'm also relying on
> KVM_GET_SUPPORTED_CPUID/KVM_X86_COMP_GUEST_SUPP being executed
> before ARCH_REQ_XCOMP_GUEST_PERM, which therefore cannot fail.
> 
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 377d993438..1d0c006077 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -43,8 +43,6 @@
>  #include "disas/capstone.h"
>  #include "cpu-internal.h"
> -#include 
> -
>  /* Helpers for building CPUID[2] descriptors: */
>  struct CPUID2CacheDescriptorInfo {
> @@ -6002,40 +6000,6 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, 
> FeatureWord w)
>  }
>  }
> -static void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
> -{
> -KVMState *s = kvm_state;
> -uint64_t bitmask;
> -long rc;
> -
> -if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
> -bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
> -if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
> -warn_report("no amx support from supported_xcr0, "
> -"bitmask:0x%lx", bitmask);
> -return;
> -}
> -
> -rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
> -  XSTATE_XTILE_DATA_BIT);
> -if (rc) {
> -/*
> - * The older kernel version(<5.15) can't support
> - * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
> - */
> -return;
> -}
> -
> -rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, );
> -if (rc) {
> -warn_report("prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
> -} else if (!(bitmask & XFEATURE_XTILE_MASK)) {
> -warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
> -"and bitmask=0x%lx", bitmask);
> -}
> -}
> -}
> -
>  /* Calculate XSAVE components based on the configured CPU feature flags */
>  static void x86_cpu_enable_xsave_components(X86CPU *cpu)
>  {
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index d4ad0f56bd..de949bd63d 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -551,11 +551,8 @@ typedef enum X86Seg {
>  #define XSTATE_PKRU_MASK(1ULL << XSTATE_PKRU_BIT)
>  #define XSTATE_XTILE_CFG_MASK   (1ULL << XSTATE_XTILE_CFG_BIT)
>  #define XSTATE_XTILE_DATA_MASK  (1ULL << XSTATE_XTILE_DATA_BIT)
> -#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK \
> - | XSTATE_XTILE_DATA_MASK)
> -#define ARCH_GET_XCOMP_GUEST_PERM   0x1024
> -#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
> +#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
>  #define ESA_FEATURE_ALIGN64_BIT 1
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index 3bdcd724c4..4b07778970 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -17,6 +17,7 @@
>  #include "qapi/error.h"
>  #include 
>  #include 
> +#include 
>  #include 
>  #include "standard-headers/asm-x86/kvm_para.h"
> @@ -5168,3 +5169,39 @@ bool kvm_arch_cpu_check_are_resettable(void)
>  {
>  return !sev_es_enabled();
>  }
> +
> +#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
> +
> +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
> +{
> +KVMState *s = kvm_state;
> +uint64_t supported;
> +
> +mask &= XSTATE_DYNAMIC_MASK;
> +if (!mask) {
> + return;
> +}
> +/*
> + * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
> + * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
> + * about them already because they are not supported features.
> + */
> +supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
> +supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 
> 32;
> +mask &= ~supported;


  Paolo, thanks for your great help!
  Except changing "mask &= ~supported" to "mask &= supported", this patch work 
well.

  Please re-sync Linux-header since David has 

Re: [PATCH v2 3/8] x86: Grant AMX permission for guest

2022-02-17 Thread Paolo Bonzini

On 2/17/22 06:58, Yang Zhong wrote:

+
+if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
+bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {

Paolo, last time you suggested below changes for here:

rc = kvm_arch_get_supported_cpuid(s, 0xd, 0,
   (xdata_bit < 32 ? R_EAX : R_EDX));
if (!(rc & BIT(xdata_bit & 31)) {
   ...
}

   Since I used "mask" as parameter here, so I had to directly use R_EAX here.
   Please review and if need change it to like "(xdata_bit < 32 ? R_EAX : 
R_EDX)",
   I will change this in next version, thanks!


I looked at this function more closely because it didn't compile on non-Linux
systems, too.  I think it's better to write it already to plan for more
dynamic features.  In the code below, I'm also relying on
KVM_GET_SUPPORTED_CPUID/KVM_X86_COMP_GUEST_SUPP being executed
before ARCH_REQ_XCOMP_GUEST_PERM, which therefore cannot fail.

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 377d993438..1d0c006077 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -43,8 +43,6 @@
 #include "disas/capstone.h"
 #include "cpu-internal.h"
 
-#include 

-
 /* Helpers for building CPUID[2] descriptors: */
 
 struct CPUID2CacheDescriptorInfo {

@@ -6002,40 +6000,6 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, 
FeatureWord w)
 }
 }
 
-static void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)

-{
-KVMState *s = kvm_state;
-uint64_t bitmask;
-long rc;
-
-if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
-bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
-if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
-warn_report("no amx support from supported_xcr0, "
-"bitmask:0x%lx", bitmask);
-return;
-}
-
-rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
-  XSTATE_XTILE_DATA_BIT);
-if (rc) {
-/*
- * The older kernel version(<5.15) can't support
- * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
- */
-return;
-}
-
-rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, );
-if (rc) {
-warn_report("prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
-} else if (!(bitmask & XFEATURE_XTILE_MASK)) {
-warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
-"and bitmask=0x%lx", bitmask);
-}
-}
-}
-
 /* Calculate XSAVE components based on the configured CPU feature flags */
 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
 {
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index d4ad0f56bd..de949bd63d 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -551,11 +551,8 @@ typedef enum X86Seg {
 #define XSTATE_PKRU_MASK(1ULL << XSTATE_PKRU_BIT)
 #define XSTATE_XTILE_CFG_MASK   (1ULL << XSTATE_XTILE_CFG_BIT)
 #define XSTATE_XTILE_DATA_MASK  (1ULL << XSTATE_XTILE_DATA_BIT)
-#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK \
- | XSTATE_XTILE_DATA_MASK)
 
-#define ARCH_GET_XCOMP_GUEST_PERM   0x1024

-#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
+#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
 
 #define ESA_FEATURE_ALIGN64_BIT 1
 
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c

index 3bdcd724c4..4b07778970 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -17,6 +17,7 @@
 #include "qapi/error.h"
 #include 
 #include 
+#include 
 
 #include 

 #include "standard-headers/asm-x86/kvm_para.h"
@@ -5168,3 +5169,39 @@ bool kvm_arch_cpu_check_are_resettable(void)
 {
 return !sev_es_enabled();
 }
+
+#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
+
+void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
+{
+KVMState *s = kvm_state;
+uint64_t supported;
+
+mask &= XSTATE_DYNAMIC_MASK;
+if (!mask) {
+   return;
+}
+/*
+ * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
+ * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
+ * about them already because they are not supported features.
+ */
+supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 
32;
+mask &= ~supported;
+
+while (mask) {
+int bit = ctz64(mask);
+int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
+if (rc) {
+/*
+ * Older kernel version (<5.17) do not support
+ * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
+ * any dynamic feature from kvm_arch_get_supported_cpuid.
+ */
+warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
+"for feature bit 

Re: [PATCH v2 3/8] x86: Grant AMX permission for guest

2022-02-16 Thread Yang Zhong
On Wed, Feb 16, 2022 at 10:04:29PM -0800, Yang Zhong wrote:
> Kernel allocates 4K xstate buffer by default. For XSAVE features
> which require large state component (e.g. AMX), Linux kernel
> dynamically expands the xstate buffer only after the process has
> acquired the necessary permissions. Those are called dynamically-
> enabled XSAVE features (or dynamic xfeatures).
> 
> There are separate permissions for native tasks and guests.
> 
> Qemu should request the guest permissions for dynamic xfeatures
> which will be exposed to the guest. This only needs to be done
> once before the first vcpu is created.
> 
> KVM implemented one new ARCH_GET_XCOMP_SUPP system attribute API to
> get host side supported_xcr0 and Qemu can decide if it can request
> dynamically enabled XSAVE features permission.
> https://lore.kernel.org/all/20220126152210.3044876-1-pbonz...@redhat.com/
> 
> Suggested-by: Paolo Bonzini 
> Signed-off-by: Yang Zhong 
> Signed-off-by: Jing Liu 
> ---
>  target/i386/cpu.h |  7 +++
>  target/i386/cpu.c | 43 +++
>  target/i386/kvm/kvm-cpu.c | 12 +--
>  target/i386/kvm/kvm.c | 20 ++
>  4 files changed, 76 insertions(+), 6 deletions(-)
> 
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 06d2d6bccf..d4ad0f56bd 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -549,6 +549,13 @@ typedef enum X86Seg {
>  #define XSTATE_ZMM_Hi256_MASK   (1ULL << XSTATE_ZMM_Hi256_BIT)
>  #define XSTATE_Hi16_ZMM_MASK(1ULL << XSTATE_Hi16_ZMM_BIT)
>  #define XSTATE_PKRU_MASK(1ULL << XSTATE_PKRU_BIT)
> +#define XSTATE_XTILE_CFG_MASK   (1ULL << XSTATE_XTILE_CFG_BIT)
> +#define XSTATE_XTILE_DATA_MASK  (1ULL << XSTATE_XTILE_DATA_BIT)
> +#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK \
> + | XSTATE_XTILE_DATA_MASK)
> +
> +#define ARCH_GET_XCOMP_GUEST_PERM   0x1024
> +#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
>  
>  #define ESA_FEATURE_ALIGN64_BIT 1
>  
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index ea7e8f9081..377d993438 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -43,6 +43,8 @@
>  #include "disas/capstone.h"
>  #include "cpu-internal.h"
>  
> +#include 
> +
>  /* Helpers for building CPUID[2] descriptors: */
>  
>  struct CPUID2CacheDescriptorInfo {
> @@ -6000,12 +6002,47 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, 
> FeatureWord w)
>  }
>  }
>  
> +static void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
> +{
> +KVMState *s = kvm_state;
> +uint64_t bitmask;
> +long rc;
> +
> +if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
> +bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
> +if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {

   Paolo, last time you suggested below changes for here:

   rc = kvm_arch_get_supported_cpuid(s, 0xd, 0,
  (xdata_bit < 32 ? R_EAX : R_EDX));
   if (!(rc & BIT(xdata_bit & 31)) {
  ...
   }   

  Since I used "mask" as parameter here, so I had to directly use R_EAX here.
  Please review and if need change it to like "(xdata_bit < 32 ? R_EAX : 
R_EDX)",
  I will change this in next version, thanks!

  Yang


> +warn_report("no amx support from supported_xcr0, "
> +"bitmask:0x%lx", bitmask);
> +return;
> +}
> +
> +rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
> +  XSTATE_XTILE_DATA_BIT);
> +if (rc) {
> +/*
> + * The older kernel version(<5.15) can't support
> + * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
> + */
> +return;
> +}
> +
> +rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, );
> +if (rc) {
> +warn_report("prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
> +} else if (!(bitmask & XFEATURE_XTILE_MASK)) {
> +warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
> +"and bitmask=0x%lx", bitmask);
> +}
> +}
> +}
> +
>  /* Calculate XSAVE components based on the configured CPU feature flags */
>  static void x86_cpu_enable_xsave_components(X86CPU *cpu)
>  {
>  CPUX86State *env = >env;
>  int i;
>  uint64_t mask;
> +static bool request_perm;
>  
>  if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
>  env->features[FEAT_XSAVE_COMP_LO] = 0;
> @@ -6021,6 +6058,12 @@ static void x86_cpu_enable_xsave_components(X86CPU 
> *cpu)
>  }
>  }
>  
> +/* Only request permission for first vcpu */
> +if (kvm_enabled() && !request_perm) {
> +kvm_request_xsave_components(cpu, mask);
> +request_perm = true;
> +}
> +
>  env->features[FEAT_XSAVE_COMP_LO] = mask;
>  env->features[FEAT_XSAVE_COMP_HI] = mask 

[PATCH v2 3/8] x86: Grant AMX permission for guest

2022-02-16 Thread Yang Zhong
Kernel allocates 4K xstate buffer by default. For XSAVE features
which require large state component (e.g. AMX), Linux kernel
dynamically expands the xstate buffer only after the process has
acquired the necessary permissions. Those are called dynamically-
enabled XSAVE features (or dynamic xfeatures).

There are separate permissions for native tasks and guests.

Qemu should request the guest permissions for dynamic xfeatures
which will be exposed to the guest. This only needs to be done
once before the first vcpu is created.

KVM implemented one new ARCH_GET_XCOMP_SUPP system attribute API to
get host side supported_xcr0 and Qemu can decide if it can request
dynamically enabled XSAVE features permission.
https://lore.kernel.org/all/20220126152210.3044876-1-pbonz...@redhat.com/

Suggested-by: Paolo Bonzini 
Signed-off-by: Yang Zhong 
Signed-off-by: Jing Liu 
---
 target/i386/cpu.h |  7 +++
 target/i386/cpu.c | 43 +++
 target/i386/kvm/kvm-cpu.c | 12 +--
 target/i386/kvm/kvm.c | 20 ++
 4 files changed, 76 insertions(+), 6 deletions(-)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 06d2d6bccf..d4ad0f56bd 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -549,6 +549,13 @@ typedef enum X86Seg {
 #define XSTATE_ZMM_Hi256_MASK   (1ULL << XSTATE_ZMM_Hi256_BIT)
 #define XSTATE_Hi16_ZMM_MASK(1ULL << XSTATE_Hi16_ZMM_BIT)
 #define XSTATE_PKRU_MASK(1ULL << XSTATE_PKRU_BIT)
+#define XSTATE_XTILE_CFG_MASK   (1ULL << XSTATE_XTILE_CFG_BIT)
+#define XSTATE_XTILE_DATA_MASK  (1ULL << XSTATE_XTILE_DATA_BIT)
+#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK \
+ | XSTATE_XTILE_DATA_MASK)
+
+#define ARCH_GET_XCOMP_GUEST_PERM   0x1024
+#define ARCH_REQ_XCOMP_GUEST_PERM   0x1025
 
 #define ESA_FEATURE_ALIGN64_BIT 1
 
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ea7e8f9081..377d993438 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -43,6 +43,8 @@
 #include "disas/capstone.h"
 #include "cpu-internal.h"
 
+#include 
+
 /* Helpers for building CPUID[2] descriptors: */
 
 struct CPUID2CacheDescriptorInfo {
@@ -6000,12 +6002,47 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, 
FeatureWord w)
 }
 }
 
+static void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
+{
+KVMState *s = kvm_state;
+uint64_t bitmask;
+long rc;
+
+if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
+bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
+warn_report("no amx support from supported_xcr0, "
+"bitmask:0x%lx", bitmask);
+return;
+}
+
+rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
+  XSTATE_XTILE_DATA_BIT);
+if (rc) {
+/*
+ * The older kernel version(<5.15) can't support
+ * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
+ */
+return;
+}
+
+rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, );
+if (rc) {
+warn_report("prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
+} else if (!(bitmask & XFEATURE_XTILE_MASK)) {
+warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
+"and bitmask=0x%lx", bitmask);
+}
+}
+}
+
 /* Calculate XSAVE components based on the configured CPU feature flags */
 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
 {
 CPUX86State *env = >env;
 int i;
 uint64_t mask;
+static bool request_perm;
 
 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
 env->features[FEAT_XSAVE_COMP_LO] = 0;
@@ -6021,6 +6058,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
 }
 }
 
+/* Only request permission for first vcpu */
+if (kvm_enabled() && !request_perm) {
+kvm_request_xsave_components(cpu, mask);
+request_perm = true;
+}
+
 env->features[FEAT_XSAVE_COMP_LO] = mask;
 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
 }
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index ce27d3b1df..a35a1bf9fe 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -84,7 +84,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu)
 static void kvm_cpu_xsave_init(void)
 {
 static bool first = true;
-KVMState *s = kvm_state;
+uint32_t eax, ebx, ecx, edx;
 int i;
 
 if (!first) {
@@ -100,11 +100,11 @@ static void kvm_cpu_xsave_init(void)
 ExtSaveArea *esa = _ext_save_areas[i];
 
 if (esa->size) {
-int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
-if (sz != 0) {
-assert(esa->size == sz);
-esa->offset =