This patch has no semantic change, but it prepares for the introduction
of a second address space for system management mode.
A new function x86_set_memory_region (and the slots_lock taken
counterpart __x86_set_memory_region) is introduced in order to
operate on all address spaces when adding or deleting private
memory slots.
Signed-off-by: Paolo Bonzini pbonz...@redhat.com
---
v1-v2: new
arch/x86/include/asm/kvm_host.h | 5 +++
arch/x86/kvm/mmu.c | 84 ++---
arch/x86/kvm/vmx.c | 6 +--
arch/x86/kvm/x86.c | 40 ++--
4 files changed, 91 insertions(+), 44 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2fd420255c2f..5a5e13af6e03 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1189,4 +1189,9 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc,
u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+int __x86_set_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem);
+int x86_set_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ad67b56c6832..a749490bc1db 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1503,30 +1503,33 @@ static int kvm_handle_hva_range(struct kvm *kvm,
struct kvm_memory_slot *memslot;
struct slot_rmap_walk_iterator iterator;
int ret = 0;
+ int i;
- slots = kvm_memslots(kvm);
-
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn_start, gfn_end;
+ for (i = 0; i KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn_start, gfn_end;
- hva_start = max(start, memslot-userspace_addr);
- hva_end = min(end, memslot-userspace_addr +
- (memslot-npages PAGE_SHIFT));
- if (hva_start = hva_end)
- continue;
- /*
-* {gfn(page) | page intersects with [hva_start, hva_end)} =
-* {gfn_start, gfn_start+1, ..., gfn_end-1}.
-*/
- gfn_start = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
- PT_MAX_HUGEPAGE_LEVEL, gfn_start, gfn_end - 1,
- iterator)
- ret |= handler(kvm, iterator.rmap, memslot,
- iterator.gfn, iterator.level, data);
+ hva_start = max(start, memslot-userspace_addr);
+ hva_end = min(end, memslot-userspace_addr +
+ (memslot-npages PAGE_SHIFT));
+ if (hva_start = hva_end)
+ continue;
+ /*
+* {gfn(page) | page intersects with [hva_start,
hva_end)} =
+* {gfn_start, gfn_start+1, ..., gfn_end-1}.
+*/
+ gfn_start = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1,
memslot);
+
+ for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
+PT_MAX_HUGEPAGE_LEVEL,
+gfn_start, gfn_end - 1,
+iterator)
+ ret |= handler(kvm, iterator.rmap, memslot,
+ iterator.gfn, iterator.level,
data);
+ }
}
return ret;
@@ -4536,21 +4539,23 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t
gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
-
- slots = kvm_memslots(kvm);
+ int i;
spin_lock(kvm-mmu_lock);
- kvm_for_each_memslot(memslot, slots) {
- gfn_t start, end;
-
- start = max(gfn_start, memslot-base_gfn);
- end = min(gfn_end, memslot-base_gfn + memslot-npages);
- if (start = end)
- continue;
+ for (i = 0; i KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ gfn_t start, end;
+
+