From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 29 Aug 2008 14:02:29 +0800
Subject: [PATCH] KVM: MMU: Add shadow_accessed_shift
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/mmu.c | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3008279..0997d82 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -151,6 +151,7 @@ static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with
nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
+static u16 __read_mostly shadow_accessed_shift;
static u64 __read_mostly shadow_dirty_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
@@ -171,6 +172,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64
accessed_mask,
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
+ shadow_accessed_shift = find_first_bit((unsigned long *)&accessed_mask,
+ sizeof(accessed_mask));
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
@@ -709,10 +712,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long
*rmapp)
int _young;
u64 _spte = *spte;
BUG_ON(!(_spte & PT_PRESENT_MASK));
- _young = _spte & PT_ACCESSED_MASK;
+ _young = _spte & shadow_accessed_mask;
if (_young) {
young = 1;
- clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ clear_bit(shadow_accessed_shift, (unsigned long *)spte);
}
spte = rmap_next(kvm, rmapp, spte);
}
@@ -1785,7 +1788,7 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu,
gfn_t gfn)
&& shadow_accessed_mask
&& !(*spte & shadow_accessed_mask)
&& is_shadow_present_pte(*spte))
- set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ set_bit(shadow_accessed_shift, (unsigned long *)spte);
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
--
1.5.4.5
From 3a2cc947a656a6eb4e815e64a44cb3e77a162a89 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 29 Aug 2008 14:02:29 +0800
Subject: [PATCH] KVM: MMU: Add shadow_accessed_shift
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/mmu.c | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3008279..0997d82 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -151,6 +151,7 @@ static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
+static u16 __read_mostly shadow_accessed_shift;
static u64 __read_mostly shadow_dirty_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
@@ -171,6 +172,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
+ shadow_accessed_shift = find_first_bit((unsigned long *)&accessed_mask,
+ sizeof(accessed_mask));
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
@@ -709,10 +712,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
int _young;
u64 _spte = *spte;
BUG_ON(!(_spte & PT_PRESENT_MASK));
- _young = _spte & PT_ACCESSED_MASK;
+ _young = _spte & shadow_accessed_mask;
if (_young) {
young = 1;
- clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ clear_bit(shadow_accessed_shift, (unsigned long *)spte);
}
spte = rmap_next(kvm, rmapp, spte);
}
@@ -1785,7 +1788,7 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
&& shadow_accessed_mask
&& !(*spte & shadow_accessed_mask)
&& is_shadow_present_pte(*spte))
- set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ set_bit(shadow_accessed_shift, (unsigned long *)spte);
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
--
1.5.4.5