To be used by next patch.
Signed-off-by: Marcelo Tosatti <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.c | 11 ++++++-----
arch/x86/kvm/paging_tmpl.h | 2 +-
arch/x86/kvm/x86.c | 2 +-
4 files changed, 9 insertions(+), 8 deletions(-)
Index: kvm.pinned-sptes/arch/x86/include/asm/kvm_host.h
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/include/asm/kvm_host.h 2014-06-18
17:27:47.579549247 -0300
+++ kvm.pinned-sptes/arch/x86/include/asm/kvm_host.h 2014-06-18
17:28:17.549456614 -0300
@@ -259,7 +259,7 @@
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
- bool prefault);
+ bool prefault, bool pin, bool *pinned);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
Index: kvm.pinned-sptes/arch/x86/kvm/mmu.c
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/kvm/mmu.c 2014-06-18 17:27:47.582549238
-0300
+++ kvm.pinned-sptes/arch/x86/kvm/mmu.c 2014-06-18 17:28:17.550456611 -0300
@@ -2899,7 +2899,7 @@
static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
- gfn_t gfn, bool prefault)
+ gfn_t gfn, bool prefault, bool pin, bool *pinned)
{
int r;
int level;
@@ -3299,7 +3299,8 @@
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
- u32 error_code, bool prefault)
+ u32 error_code, bool prefault, bool pin,
+ bool *pinned)
{
gfn_t gfn;
int r;
@@ -3323,7 +3324,7 @@
gfn = gva >> PAGE_SHIFT;
return nonpaging_map(vcpu, gva & PAGE_MASK,
- error_code, gfn, prefault);
+ error_code, gfn, prefault, pin, pinned);
}
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
@@ -3373,7 +3374,7 @@
}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
- bool prefault)
+ bool prefault, bool pin, bool *pinned)
{
pfn_t pfn;
int r;
@@ -4190,7 +4191,7 @@
int r, emulation_type = EMULTYPE_RETRY;
enum emulation_result er;
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false, false,
NULL);
if (r < 0)
goto out;
Index: kvm.pinned-sptes/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/kvm/paging_tmpl.h 2014-06-18
17:27:47.583549234 -0300
+++ kvm.pinned-sptes/arch/x86/kvm/paging_tmpl.h 2014-06-18 17:28:17.550456611
-0300
@@ -687,7 +687,7 @@
* a negative value on error.
*/
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
- bool prefault)
+ bool prefault, bool pin, bool *pinned)
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
Index: kvm.pinned-sptes/arch/x86/kvm/x86.c
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/kvm/x86.c 2014-06-18 17:27:47.586549225
-0300
+++ kvm.pinned-sptes/arch/x86/kvm/x86.c 2014-06-18 17:28:17.552456605 -0300
@@ -7415,7 +7415,7 @@
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
return;
- vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+ vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true, false, NULL);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html