Marcelo Tosatti wrote:
Cache the unsynced children information in a per-page bitmap.
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
@@ -946,33 +978,57 @@ static void nonpaging_invlpg(struct kvm_
static int mmu_unsync_walk(struct kvm_mmu_page *parent, mmu_unsync_fn fn,
void *priv)
{
- int i, ret;
- struct kvm_mmu_page *sp = parent;
+ int ret, level, i;
+ u64 ent;
+ struct kvm_mmu_page *sp, *child;
+ struct walk {
+ struct kvm_mmu_page *sp;
+ int pos;
+ } walk[PT64_ROOT_LEVEL];
- while (parent->unsync_children) {
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- u64 ent = sp->spt[i];
+ WARN_ON(parent->role.level == PT_PAGE_TABLE_LEVEL);
+
+ if (!parent->unsync_children)
+ return 0;
+
+ memset(&walk, 0, sizeof(walk));
+ level = parent->role.level;
+ walk[level-1].sp = parent;
+
+ do {
+ sp = walk[level-1].sp;
+ i = find_next_bit(sp->unsync_child_bitmap, 512,
walk[level-1].pos);
+ if (i < 512) {
+ walk[level-1].pos = i+1;
+ ent = sp->spt[i];
if (is_shadow_present_pte(ent)) {
- struct kvm_mmu_page *child;
child = page_header(ent & PT64_BASE_ADDR_MASK);
if (child->unsync_children) {
- sp = child;
- break;
+ --level;
+ walk[level-1].sp = child;
+ walk[level-1].pos = 0;
+ continue;
}
if (child->unsync) {
ret = fn(child, priv);
+ __clear_bit(i, sp->unsync_child_bitmap);
if (ret)
return ret;
}
}
+ __clear_bit(i, sp->unsync_child_bitmap);
+ } else {
+ ++level;
+ if (find_first_bit(sp->unsync_child_bitmap, 512) ==
512) {
+ sp->unsync_children = 0;
+ if (level-1 < PT64_ROOT_LEVEL)
+ walk[level-1].pos = 0;
+ }
}
- if (i == PT64_ENT_PER_PAGE) {
- sp->unsync_children = 0;
- sp = parent;
- }
- }
+ } while (level <= parent->role.level);
+
return 0;
}
<weeps>
--- kvm.orig/include/asm-x86/kvm_host.h
+++ kvm/include/asm-x86/kvm_host.h
@@ -201,6 +201,7 @@ struct kvm_mmu_page {
u64 *parent_pte; /* !multimapped */
struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
};
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
};
Later, we can throw this bitmap out to a separate object. Also, it may
make sense to replace it with an array of u16s.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html