From: Sheng Yang <[EMAIL PROTECTED]>

We use a "fake" A/D bit for EPT, to keep epte behaviour consistent with shadow
spte. But it's not that good for MMU notifier. Now we can only expect return
young=0 for clean_flush_young() in most condition.

Also fix a unproper judgement based on shadow_accessed_mask=0 for EPT.

Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/mmu.c |   10 ++++++----
 1 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9bc31fc..1e9f9b4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -158,6 +158,7 @@ static u64 __read_mostly shadow_nx_mask;
 static u64 __read_mostly shadow_x_mask;        /* mutual exclusive with 
nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
+static u16 __read_mostly shadow_accessed_shift;
 static u64 __read_mostly shadow_dirty_mask;
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
@@ -178,6 +179,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 {
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
+       shadow_accessed_shift = find_first_bit((unsigned long *)&accessed_mask,
+                                              sizeof(accessed_mask));
        shadow_dirty_mask = dirty_mask;
        shadow_nx_mask = nx_mask;
        shadow_x_mask = x_mask;
@@ -716,10 +719,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long 
*rmapp)
                int _young;
                u64 _spte = *spte;
                BUG_ON(!(_spte & PT_PRESENT_MASK));
-               _young = _spte & PT_ACCESSED_MASK;
+               _young = _spte & shadow_accessed_mask;
                if (_young) {
                        young = 1;
-                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+                       clear_bit(shadow_accessed_shift, (unsigned long *)spte);
                }
                spte = rmap_next(kvm, rmapp, spte);
        }
@@ -1789,10 +1792,9 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, 
gfn_t gfn)
 
        if (spte
            && vcpu->arch.last_pte_gfn == gfn
-           && shadow_accessed_mask
            && !(*spte & shadow_accessed_mask)
            && is_shadow_present_pte(*spte))
-               set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+               set_bit(shadow_accessed_shift, (unsigned long *)spte);
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-- 
1.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to