From: Sean Christopherson <sean.j.christopher...@intel.com>

Employ a 'continue' to reduce the indentation for linking a new shadow
page during __direct_map() in preparation for linking private pages.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 732510ecda36..25aafac9b5de 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2953,16 +2953,15 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t 
gpa, u32 error_code,
                        break;
 
                drop_large_spte(vcpu, it.sptep);
-               if (!is_shadow_present_pte(*it.sptep)) {
-                       sp = __kvm_mmu_get_page(vcpu, base_gfn,
-                                               gfn_stolen_bits, it.addr,
-                                               it.level - 1, true, ACC_ALL);
-
-                       link_shadow_page(vcpu, it.sptep, sp);
-                       if (is_tdp && huge_page_disallowed &&
-                           req_level >= it.level)
-                               account_huge_nx_page(vcpu->kvm, sp);
-               }
+               if (is_shadow_present_pte(*it.sptep))
+                       continue;
+
+               sp = __kvm_mmu_get_page(vcpu, base_gfn, gfn_stolen_bits,
+                                       it.addr, it.level - 1, true, ACC_ALL);
+
+               link_shadow_page(vcpu, it.sptep, sp);
+               if (is_tdp && huge_page_disallowed && req_level >= it.level)
+                       account_huge_nx_page(vcpu->kvm, sp);
        }
 
        ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
-- 
2.17.1

Reply via email to