On 09/23/2015 05:36 PM, Paolo Bonzini wrote:


On 23/09/2015 09:56, Borislav Petkov wrote:
On Tue, Sep 22, 2015 at 11:04:38PM +0200, Paolo Bonzini wrote:
Let's add more debugging output:

Here you go:

[   50.474002] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, 
addr 0xb8000 (level 4, 0xf0000000000f8)
[   50.484249] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, 
addr 0xb8000 (level 3, 0xf000000000078)
[   50.494492] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, 
addr 0xb8000 (level 2, 0xf000000000078)

And another patch, which both cranks up the debugging a bit and
tries another fix:

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index dd05b9cef6ae..b2f49bb15ba1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -105,8 +105,15 @@ static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu 
*vcpu)
  static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
  {
        struct kvm_cpuid_entry2 *best;
+       static bool first;

        best = kvm_find_cpuid_entry(vcpu, 0, 0);
+       if (first && best) {
+               printk("cpuid(0).ebx = %x\n", best->ebx);
+               first = false;
+       } else if (first)
+               printk_ratelimited("cpuid(0) not initialized yet\n");
+
        return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
  }

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bf1122e9c7bf..f50b280ffee1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3625,7 +3625,7 @@ static void
  __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                        struct rsvd_bits_validate *rsvd_check,
                        int maxphyaddr, int level, bool nx, bool gbpages,
-                       bool pse)
+                       bool pse, bool amd)
  {
        u64 exb_bit_rsvd = 0;
        u64 gbpages_bit_rsvd = 0;
@@ -3642,7 +3642,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
         * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
         * leaf entries) on AMD CPUs only.
         */
-       if (guest_cpuid_is_amd(vcpu))
+       if (amd)
                nonleaf_bit8_rsvd = rsvd_bits(8, 8);

        switch (level) {
@@ -3710,7 +3710,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
        __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
                                cpuid_maxphyaddr(vcpu), context->root_level,
                                context->nx, guest_cpuid_has_gbpages(vcpu),
-                               is_pse(vcpu));
+                               is_pse(vcpu), guest_cpuid_is_amd(vcpu));
  }

  static void
@@ -3760,13 +3760,25 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu 
*vcpu,
  void
  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
  {
+       /*
+        * Passing "true" to the last argument is okay; it adds a check
+        * on bit 8 of the SPTEs which KVM doesn't use anyway.
+        */
        __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
                                boot_cpu_data.x86_phys_bits,
                                context->shadow_root_level, context->nx,
-                               guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+                               guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+                               true);
  }
  EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

+static inline bool
+boot_cpu_is_amd(void)
+{
+       WARN_ON_ONCE(!tdp_enabled);
+       return shadow_x_mask != 0;

shadow_x_mask != 0 is Intel's CPU.

Borislav, could you please check shadow_x_mask == 0 instead and test it again?

Further more, use guest_cpuid_is_amd() to detect hardware CPU vendor is wrong
as usespace can fool KVM. Should test host CPUID or introduce intel/amd callback
instead.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to