Currently, when we fetch an spte, we only verify that gptes match those that
the walker saw if we build new shadow pages for them.
However, this misses the following race:
vcpu1 vcpu2
walk
change gpte
walk
instantiate sp
fetch existing sp
Fix by validating every gpte, regardless of whether it is used for building
a new sp or not.
Signed-off-by: Avi Kivity <[email protected]>
---
arch/x86/kvm/paging_tmpl.h | 18 ++++++++++--------
1 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a7f8295..4bbd0c7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -325,7 +325,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
int *ptwrite, pfn_t pfn)
{
unsigned access = gw->pt_access;
- struct kvm_mmu_page *sp;
+ struct kvm_mmu_page *uninitialized_var(sp);
u64 *sptep = NULL;
int uninitialized_var(level);
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
@@ -343,18 +343,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t
addr,
shadow_walk_okay(&iterator) && iterator.level > gw->level;
shadow_walk_next(&iterator)) {
gfn_t table_gfn;
+ bool new_page = false;
level = iterator.level;
sptep = iterator.sptep;
drop_spte_if_large(vcpu, sptep);
- if (is_shadow_present_pte(*sptep))
- continue;
-
- table_gfn = gw->table_gfn[level - 2];
- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
- false, access, sptep);
+ if (!is_shadow_present_pte(*sptep)) {
+ table_gfn = gw->table_gfn[level - 2];
+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+ false, access, sptep);
+ new_page = true;
+ }
if (!FNAME(validate_indirect_spte)(vcpu, sptep, sp,
gw, level)) {
@@ -362,7 +363,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
return NULL;
}
- link_shadow_page(sptep, sp);
+ if (new_page)
+ link_shadow_page(sptep, sp);
}
for (;
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html