Return early from mmu_set_spte() if the new SPTE is not-present so as to
reduce the indentation of the code that performs metadata updates, e.g.
rmap manipulation.  Additional metadata updates will soon follow...

Signed-off-by: Sean Christopherson <[email protected]>
---
 arch/x86/kvm/mmu/mmu.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 82f69a7456004..182f398036248 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3126,12 +3126,14 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*sptep,
        if (!was_rmapped && is_large_pte(*sptep))
                ++vcpu->kvm->stat.lpages;
 
-       if (is_shadow_present_pte(*sptep)) {
-               if (!was_rmapped) {
-                       rmap_count = rmap_add(vcpu, sptep, gfn);
-                       if (rmap_count > RMAP_RECYCLE_THRESHOLD)
-                               rmap_recycle(vcpu, sptep, gfn);
-               }
+       /* No additional tracking necessary for not-present SPTEs. */
+       if (!is_shadow_present_pte(*sptep))
+               return ret;
+
+       if (!was_rmapped) {
+               rmap_count = rmap_add(vcpu, sptep, gfn);
+               if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+                       rmap_recycle(vcpu, sptep, gfn);
        }
 
        return ret;
-- 
2.28.0

Reply via email to