walk_addr_generic() is a hot path and is also hard for the cpu to predict -
some of the parameters (fetch_fault in particular) vary wildly from
invocation to invocation.

Add unlikely() annotations where appropriate; all walk failures are
considered unlikely, as are cases where we have to mark the accessed or
dirty bit, as they are slow paths both in kvm and on real processors.

Signed-off-by: Avi Kivity <[email protected]>
---
 arch/x86/kvm/paging_tmpl.h |   28 +++++++++++++++-------------
 1 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a32a1c8..652d56c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -172,49 +172,51 @@ walk:
 
                real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
                                              PFERR_USER_MASK|PFERR_WRITE_MASK);
-               if (real_gfn == UNMAPPED_GVA) {
+               if (unlikely(real_gfn == UNMAPPED_GVA)) {
                        present = false;
                        break;
                }
                real_gfn = gpa_to_gfn(real_gfn);
 
                host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
-               if (kvm_is_error_hva(host_addr)) {
+               if (unlikely(kvm_is_error_hva(host_addr))) {
                        present = false;
                        break;
                }
 
                ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
-               if (get_user(pte, ptep_user)) {
+               if (unlikely(get_user(pte, ptep_user))) {
                        present = false;
                        break;
                }
 
                trace_kvm_mmu_paging_element(pte, walker->level);
 
-               if (!is_present_gpte(pte)) {
+               if (unlikely(!is_present_gpte(pte))) {
                        present = false;
                        break;
                }
 
-               if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) {
+               if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
+                                             walker->level))) {
                        rsvd_fault = true;
                        break;
                }
 
-               if (write_fault && !is_writable_pte(pte))
-                       if (user_fault || is_write_protection(vcpu))
-                               eperm = true;
+               if (unlikely(write_fault && !is_writable_pte(pte)
+                            && (user_fault || is_write_protection(vcpu))))
+                       eperm = true;
 
-               if (user_fault && !(pte & PT_USER_MASK))
+               if (unlikely(user_fault && !(pte & PT_USER_MASK)))
                        eperm = true;
 
 #if PTTYPE == 64
-               if (fetch_fault && (pte & PT64_NX_MASK))
+               if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
                        eperm = true;
 #endif
 
-               if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) {
+               if (!eperm && !rsvd_fault
+                   && unlikely(!(pte & PT_ACCESSED_MASK))) {
                        int ret;
                        trace_kvm_mmu_set_accessed_bit(table_gfn, index,
                                                       sizeof(pte));
@@ -270,10 +272,10 @@ walk:
                --walker->level;
        }
 
-       if (!present || eperm || rsvd_fault)
+       if (unlikely(!present || eperm || rsvd_fault))
                goto error;
 
-       if (write_fault && !is_dirty_gpte(pte)) {
+       if (write_fault && unlikely(!is_dirty_gpte(pte))) {
                int ret;
 
                trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
-- 
1.7.4.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to