The commit is pushed to "branch-rh7-3.10.0-1160.25.1.vz7.180.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1160.25.1.vz7.180.7
------>
commit 0ae04afa6618c6394a767c3e280c4e4d160a92f0
Author: Vasily Averin <[email protected]>
Date:   Tue May 25 10:00:18 2021 +0300

    fixup2 for "ms/KVM: AMD Milan Support"
    
    fixed space-indents in svm_adjust_mmio_mask()
    and reverted incorrect upstream backports
    
    https://jira.sw.ru/browse/PSBM-129674
    Signed-off-by: Vasily Averin <[email protected]>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/mmu.c              | 13 ++++-------
 arch/x86/kvm/mmu.h              |  2 +-
 arch/x86/kvm/svm.c              | 52 ++++++++++++++++++++---------------------
 arch/x86/kvm/vmx.c              |  2 +-
 arch/x86/kvm/x86.c              |  5 +---
 6 files changed, 34 insertions(+), 41 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 822e16e..4f72e16 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -192,7 +192,6 @@ enum {
  * with the SVE bit in EPT PTEs.
  */
 #define SPTE_SPECIAL_MASK (1ULL << 62)
-#define SPTE_MMIO_MASK (3ULL << 52)
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b4aa684..dc28b53 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -237,7 +237,6 @@ static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
 static u64 __read_mostly shadow_mmio_mask;
 static u64 __read_mostly shadow_mmio_value;
-static u64 __read_mostly shadow_mmio_access_mask;
 static u64 __read_mostly shadow_present_mask;
 static u64 __read_mostly shadow_me_mask;
 
@@ -267,13 +266,12 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
 #define CREATE_TRACE_POINTS
 #include "mmutrace.h"
 
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
+
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
 {
-       BUG_ON((u64)(unsigned int)access_mask != access_mask);
        BUG_ON((mmio_mask & mmio_value) != mmio_value);
-       shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+       shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
        shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
-       shadow_mmio_access_mask = access_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
 
@@ -360,7 +358,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 
*sptep, u64 gfn,
        unsigned int gen = kvm_current_mmio_generation(vcpu);
        u64 mask = generation_mmio_spte_mask(gen);
 
-       access &= shadow_mmio_access_mask;
        access &= ACC_WRITE_MASK | ACC_USER_MASK;
        mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
 
@@ -382,7 +379,7 @@ static gfn_t get_mmio_spte_gfn(u64 spte)
 static unsigned get_mmio_spte_access(u64 spte)
 {
        u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
-       return (spte & ~mask) & ~PAGE_MASK & shadow_mmio_access_mask;
+       return (spte & ~mask) & ~PAGE_MASK;
 }
 
 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
@@ -3262,7 +3259,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, 
gva_t gva, gfn_t gfn,
        }
 
        if (unlikely(is_noslot_pfn(pfn)))
-               vcpu_cache_mmio_info(vcpu, gva, gfn, access & 
shadow_mmio_access_mask);
+               vcpu_cache_mmio_info(vcpu, gva, gfn, access);
 
        ret = false;
 exit:
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 592bbd1..b72acc2 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -52,7 +52,7 @@ static inline u64 rsvd_bits(int s, int e)
        return ((1ULL << (e - s + 1)) - 1) << s;
 }
 
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 
access_mask);
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
 
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b08afce..61319e4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1160,37 +1160,37 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
  */
 static __init void svm_adjust_mmio_mask(void)
 {
-       unsigned int enc_bit, mask_bit;
-       u64 msr, mask;
+       unsigned int enc_bit, mask_bit;
+       u64 msr, mask;
 
-       /* If there is no memory encryption support, use existing mask */
-       if (cpuid_eax(0x80000000) < 0x8000001f)
-       return;
+       /* If there is no memory encryption support, use existing mask */
+       if (cpuid_eax(0x80000000) < 0x8000001f)
+               return;
 
-       /* If memory encryption is not enabled, use existing mask */
-       rdmsrl(MSR_K8_SYSCFG, msr);
-       if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
-       return;
+       /* If memory encryption is not enabled, use existing mask */
+       rdmsrl(MSR_K8_SYSCFG, msr);
+       if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+               return;
 
-       enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
-       mask_bit = boot_cpu_data.x86_phys_bits;
+       enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
+       mask_bit = boot_cpu_data.x86_phys_bits;
 
-       /* Increment the mask bit if it is the same as the encryption bit */
-       if (enc_bit == mask_bit)
-              mask_bit++;
+       /* Increment the mask bit if it is the same as the encryption bit */
+       if (enc_bit == mask_bit)
+               mask_bit++;
 
-       /*
-       * If the mask bit location is below 52, then some bits above the
-       * physical addressing limit will always be reserved, so use the
-       * rsvd_bits() function to generate the mask. This mask, along with
-       * the present bit, will be used to generate a page fault with
-       * PFER.RSV = 1.
-       *
-       * If the mask bit location is 52 (or above), then clear the mask.
-       */
-       mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
-
-       kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
+       /*
+        * If the mask bit location is below 52, then some bits above the
+        * physical addressing limit will always be reserved, so use the
+        * rsvd_bits() function to generate the mask. This mask, along with
+        * the present bit, will be used to generate a page fault with
+        * PFER.RSV = 1.
+        *
+        * If the mask bit location is 52 (or above), then clear the mask.
+        */
+       mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
+
+       kvm_mmu_set_mmio_spte_mask(mask, mask);
 }
 
 static __init int svm_hardware_setup(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4e3f35..2557796 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5512,7 +5512,7 @@ static void ept_set_mmio_spte_mask(void)
         * of an EPT paging-structure entry is 110b (write/execute).
         */
        kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
-                                  VMX_EPT_MISCONFIG_WX_VALUE, 0);
+                                  VMX_EPT_MISCONFIG_WX_VALUE);
 }
 
 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6e6e082..63063dc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6281,9 +6281,6 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
 
-#define ACC_WRITE_MASK   PT_WRITABLE_MASK
-#define ACC_USER_MASK    PT_USER_MASK
-
 static void kvm_set_mmio_spte_mask(void)
 {
        u64 mask;
@@ -6308,7 +6305,7 @@ static void kvm_set_mmio_spte_mask(void)
                mask &= ~1ull;
 #endif
 
-       kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
+       kvm_mmu_set_mmio_spte_mask(mask, mask);
 }
 
 #ifdef CONFIG_X86_64
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to