From 22895ad4778189242dd546c98709bcd3fadb0cf6 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 1 Feb 2008 06:32:19 +0800
Subject: [PATCH] KVM: MMU: Add Two Level Paging (TLP)

For EPT's path is very different from NPT's, the return value of the function
tlp_enable() indicate if EPT exists, then set up proper path.

Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/mmu.c |   71 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 635e70c..14de6d0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1188,6 +1188,55 @@ static void paging_free(struct kvm_vcpu *vcpu)
 #include "paging_tmpl.h"
 #undef PTTYPE

+static int ept_paging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+                               u32 error_code)
+{
+       /* No paging fault for EPT */
+       BUG();
+       return -1;
+}
+
+static void ept_paging_free(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+}
+
+static void ept_paging_prefetch_page(struct kvm_vcpu *vcpu,
+                                    struct kvm_mmu_page *sp)
+{
+       BUG();
+}
+
+static int init_kvm_tlp_mmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu *context = &vcpu->arch.mmu;
+
+       context->new_cr3 = nonpaging_new_cr3;
+       context->shadow_root_level = 0;
+       context->root_hpa = INVALID_PAGE;
+
+       if (kvm_x86_ops->tlp_enabled() == KVM_TLP_EPT) {
+               context->page_fault = ept_paging_page_fault;
+               context->free = ept_paging_free;
+               context->prefetch_page = ept_paging_prefetch_page;
+       }
+
+       if (!is_paging(vcpu)) {
+               context->gva_to_gpa = nonpaging_gva_to_gpa;
+               context->root_level = 0;
+       } else if (is_long_mode(vcpu)) {
+               context->gva_to_gpa = paging64_gva_to_gpa;
+               context->root_level = PT64_ROOT_LEVEL;
+       } else if (is_pae(vcpu)) {
+               context->gva_to_gpa = paging64_gva_to_gpa;
+               context->root_level = PT32E_ROOT_LEVEL;
+       } else {
+               context->gva_to_gpa = paging32_gva_to_gpa;
+               context->root_level = PT32_ROOT_LEVEL;
+       }
+       return 0;
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -1229,7 +1278,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
        return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }

-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1244,6 +1293,14 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
                return paging32_init_context(vcpu);
 }

+static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+{
+       if (kvm_x86_ops->tlp_enabled())
+               return init_kvm_tlp_mmu(vcpu);
+       else
+               return init_kvm_softmmu(vcpu);
+}
+
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
@@ -1264,6 +1321,15 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;

+       if (kvm_x86_ops->tlp_enabled() == KVM_TLP_EPT) {
+               if (!is_paging(vcpu))
+                       vcpu->arch.mmu.root_hpa =
+                               VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+               else
+                       vcpu->arch.mmu.root_hpa = vcpu->arch.cr3;
+               kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+               return 0;
+       }
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                goto out;
@@ -1280,7 +1346,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);

 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
-       mmu_free_roots(vcpu);
+       if (kvm_x86_ops->tlp_enabled() != KVM_TLP_EPT)
+               mmu_free_roots(vcpu);
 }

 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
--
debian.1.5.3.7.1-dirty

From 22895ad4778189242dd546c98709bcd3fadb0cf6 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 1 Feb 2008 06:32:19 +0800
Subject: [PATCH] KVM: MMU: Add Two Level Paging (TLP)

For EPT's path is very different from NPT's, the return value of the function
tlp_enable() indicate if EPT exists, then set up proper path.

Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/mmu.c |   71 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 635e70c..14de6d0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1188,6 +1188,55 @@ static void paging_free(struct kvm_vcpu *vcpu)
 #include "paging_tmpl.h"
 #undef PTTYPE
 
+static int ept_paging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+				u32 error_code)
+{
+	/* No paging fault for EPT */
+	BUG();
+	return -1;
+}
+
+static void ept_paging_free(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+}
+
+static void ept_paging_prefetch_page(struct kvm_vcpu *vcpu,
+				     struct kvm_mmu_page *sp)
+{
+	BUG();
+}
+
+static int init_kvm_tlp_mmu(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mmu *context = &vcpu->arch.mmu;
+
+	context->new_cr3 = nonpaging_new_cr3;
+	context->shadow_root_level = 0;
+	context->root_hpa = INVALID_PAGE;
+
+	if (kvm_x86_ops->tlp_enabled() == KVM_TLP_EPT) {
+		context->page_fault = ept_paging_page_fault;
+		context->free = ept_paging_free;
+		context->prefetch_page = ept_paging_prefetch_page;
+	}
+
+	if (!is_paging(vcpu)) {
+		context->gva_to_gpa = nonpaging_gva_to_gpa;
+		context->root_level = 0;
+	} else if (is_long_mode(vcpu)) {
+		context->gva_to_gpa = paging64_gva_to_gpa;
+		context->root_level = PT64_ROOT_LEVEL;
+	} else if (is_pae(vcpu)) {
+		context->gva_to_gpa = paging64_gva_to_gpa;
+		context->root_level = PT32E_ROOT_LEVEL;
+	} else {
+		context->gva_to_gpa = paging32_gva_to_gpa;
+		context->root_level = PT32_ROOT_LEVEL;
+	}
+	return 0;
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
 	struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -1229,7 +1278,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
 	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }
 
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
 	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1244,6 +1293,14 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 		return paging32_init_context(vcpu);
 }
 
+static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+{
+	if (kvm_x86_ops->tlp_enabled())
+		return init_kvm_tlp_mmu(vcpu);
+	else
+		return init_kvm_softmmu(vcpu);
+}
+
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
@@ -1264,6 +1321,15 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
+	if (kvm_x86_ops->tlp_enabled() == KVM_TLP_EPT) {
+		if (!is_paging(vcpu))
+			vcpu->arch.mmu.root_hpa =
+				VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+		else
+			vcpu->arch.mmu.root_hpa = vcpu->arch.cr3;
+		kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+		return 0;
+	}
 	r = mmu_topup_memory_caches(vcpu);
 	if (r)
 		goto out;
@@ -1280,7 +1346,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
 
 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
-	mmu_free_roots(vcpu);
+	if (kvm_x86_ops->tlp_enabled() != KVM_TLP_EPT)
+		mmu_free_roots(vcpu);
 }
 
 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
-- 
debian.1.5.3.7.1-dirty

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to