repository: /home/avi/kvm/linux-2.6
branch: pf-emul-opt
commit a933cbbe7973d6f8ebb8a2728b73f15b1f107f41
Author: Avi Kivity <[EMAIL PROTECTED]>
Date:   Wed Nov 28 12:34:13 2007 +0200

    KVM: MMU: Return type of page fault from page fault handler
    
    We distinguish between the three types of page faults:
    - ordinary shadow faults; we can return to the guest
    - mmio; emulate or die
    - page table write; emulate or unshadow
    
    While the code already handles all three paths correctly, this makes it more
    explicit.
    
    Signed-off-by: Avi Kivity <[EMAIL PROTECTED]>

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index be18620..07e8dbc 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -114,6 +114,11 @@ struct kvm_mmu_page {
 struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
 
+/* Return codes for kvm_mmu.page_fault() */
+#define PF_HANDLED     0   /* No further action needed */
+#define PF_MMIO        1   /* Emulate */
+#define PF_PTWRITE     2   /* Emulate or unshadow */
+
 /*
  * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
  * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 9b9d1b6..552d7e6 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -897,7 +897,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
struct page *page)
                        was_rmapped = is_rmap_pte(pte);
                        if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) 
{
                                kvm_release_page_clean(page);
-                               return 0;
+                               return PF_HANDLED;
                        }
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table,
@@ -910,7 +910,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, 
struct page *page)
                        else
                                kvm_release_page_clean(page);
 
-                       return 0;
+                       return PF_HANDLED;
                }
 
                if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1039,7 +1039,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, 
gva_t gva,
 
        if (is_error_page(page)) {
                kvm_release_page_clean(page);
-               return 1;
+               return PF_MMIO;
        }
 
        return nonpaging_map(vcpu, gva & PAGE_MASK, page);
@@ -1367,15 +1367,17 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
 {
-       int r;
+       int r, pfr;
        enum emulation_result er;
 
        mutex_lock(&vcpu->kvm->lock);
-       r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
-       if (r < 0)
+       pfr = vcpu->mmu.page_fault(vcpu, cr2, error_code);
+       if (pfr < 0) {
+               r = pfr;
                goto out;
+       }
 
-       if (!r) {
+       if (pfr == PF_HANDLED) {
                r = 1;
                goto out;
        }
@@ -1394,8 +1396,14 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, 
u32 error_code)
                ++vcpu->stat.mmio_exits;
                return 0;
        case EMULATE_FAIL:
-               kvm_report_emulation_failure(vcpu, "pagetable");
-               return 1;
+               if (pfr == PF_PTWRITE) {
+                       kvm_report_emulation_failure(vcpu, "pagetable");
+                       kvm_mmu_unprotect_page_virt(vcpu, cr2);
+                       return 1;
+               } else { /* PF_MMIO */
+                       kvm_report_emulation_failure(vcpu, "mmio");
+                       return -EOPNOTSUPP;
+               }
        default:
                BUG();
        }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index b24bc7c..aa9f2c5 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -410,7 +410,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
                pgprintk("%s: guest page fault\n", __FUNCTION__);
                inject_page_fault(vcpu, addr, walker.error_code);
                vcpu->last_pt_write_count = 0; /* reset fork detector */
-               return 0;
+               return PF_HANDLED;
        }
 
        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
@@ -425,12 +425,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr,
         * mmio: emulate if accessible, otherwise its a guest fault.
         */
        if (is_io_pte(*shadow_pte))
-               return 1;
+               return PF_MMIO;
 
        ++vcpu->stat.pf_fixed;
        kvm_mmu_audit(vcpu, "post page fault (fixed)");
 
-       return write_pt;
+       return write_pt ? PF_PTWRITE : PF_HANDLED;
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index c70ac33..495fa73 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1713,8 +1713,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                ++vcpu->stat.insn_emulation;
                if (r)  {
                        ++vcpu->stat.insn_emulation_fail;
-                       if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
-                               return EMULATE_DONE;
                        return EMULATE_FAIL;
                }
        }

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4
_______________________________________________
kvm-commits mailing list
kvm-commits@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-commits

Reply via email to