From: Paul Durrant <[email protected]>

Taking a write lock on a pfncache will be disruptive if the cache is
heavily used (which only requires a read lock). Hence, in the MMU notifier
callback, take read locks on caches to check for a match; only taking a
write lock to actually perform an invalidation (after a another check).

Signed-off-by: Paul Durrant <[email protected]>
---
Cc: Sean Christopherson <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: David Woodhouse <[email protected]>

v10:
 - New in this version.
---
 virt/kvm/pfncache.c | 22 +++++++++++++++++++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index c2a2d1e145b6..4da16d494f4b 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -29,14 +29,30 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 
unsigned long start,
 
        spin_lock(&kvm->gpc_lock);
        list_for_each_entry(gpc, &kvm->gpc_list, list) {
-               write_lock_irq(&gpc->lock);
+               read_lock_irq(&gpc->lock);
 
                /* Only a single page so no need to care about length */
                if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
                    gpc->uhva >= start && gpc->uhva < end) {
-                       gpc->valid = false;
+                       read_unlock_irq(&gpc->lock);
+
+                       /*
+                        * There is a small window here where the cache could
+                        * be modified, and invalidation would no longer be
+                        * necessary. Hence check again whether invalidation
+                        * is still necessary once the write lock has been
+                        * acquired.
+                        */
+
+                       write_lock_irq(&gpc->lock);
+                       if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+                           gpc->uhva >= start && gpc->uhva < end)
+                               gpc->valid = false;
+                       write_unlock_irq(&gpc->lock);
+                       continue;
                }
-               write_unlock_irq(&gpc->lock);
+
+               read_unlock_irq(&gpc->lock);
        }
        spin_unlock(&kvm->gpc_lock);
 }
-- 
2.39.2


Reply via email to