Skip pinned shadow pages when selecting pages to zap.
Signed-off-by: Marcelo Tosatti <[email protected]>
---
arch/x86/kvm/mmu.c | 26 ++++++++++++++++++--------
1 file changed, 18 insertions(+), 8 deletions(-)
Index: kvm.pinned-sptes/arch/x86/kvm/mmu.c
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/kvm/mmu.c 2014-07-09 12:09:26.433674438
-0300
+++ kvm.pinned-sptes/arch/x86/kvm/mmu.c 2014-07-09 12:09:27.164672860 -0300
@@ -2267,16 +2267,24 @@
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
struct list_head *invalid_list)
{
- struct kvm_mmu_page *sp;
-
- if (list_empty(&kvm->arch.active_mmu_pages))
- return false;
+ struct kvm_mmu_page *sp, *nsp;
+ LIST_HEAD(pinned_list);
- sp = list_entry(kvm->arch.active_mmu_pages.prev,
- struct kvm_mmu_page, link);
- kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+ list_for_each_entry_safe_reverse(sp, nsp,
+ &kvm->arch.active_mmu_pages, link) {
+ if (sp->pinned) {
+ list_move(&sp->link, &pinned_list);
+ continue;
+ }
+ if (!list_empty(&pinned_list))
+ list_move(&pinned_list, &kvm->arch.active_mmu_pages);
+ kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+ return true;
+ }
- return true;
+ if (!list_empty(&pinned_list))
+ list_move(&pinned_list, &kvm->arch.active_mmu_pages);
+ return false;
}
/*
@@ -4679,6 +4687,8 @@
* Notify all vcpus to reload its shadow page table
* and flush TLB. Then all vcpus will switch to new
* shadow page table with the new mmu_valid_gen.
+ * MMU reload request also forces fault of
+ * sptes for pinned ranges.
*
* Note: we should do this under the protection of
* mmu-lock, otherwise, vcpu would purge shadow page
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html