Invalidate gfn_to_pfn_caches when the memory attributes of the gfn it
contains change.

Since gfn_to_pfn_caches are not hooked up to KVM's MMU notifiers, but
rather have to be invalidated right _before_ KVM's MMU notifiers are
triggers, adopt the approach used by
kvm_mmu_notifier_invalidate_range_start for invalidating gpcs inside
kvm_vm_set_mem_attributes.

Signed-off-by: Patrick Roy <[email protected]>
---
 include/linux/kvm_host.h |  1 +
 virt/kvm/kvm_main.c      |  5 +++++
 virt/kvm/kvm_mm.h        | 10 +++++++++
 virt/kvm/pfncache.c      | 45 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 61 insertions(+)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cd28eb34aaeb1..7d36164a2cee5 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -840,6 +840,7 @@ struct kvm {
 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
        /* Protected by slots_locks (for writes) and RCU (for reads) */
        struct xarray mem_attr_array;
+       bool attribute_change_in_progress;
 #endif
        char stats_id[KVM_STATS_NAME_SIZE];
 };
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 13347fb03d4a9..183f7ce57a428 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2533,6 +2533,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, 
gfn_t start, gfn_t end,
 
        mutex_lock(&kvm->slots_lock);
 
+
        /* Nothing to do if the entire range as the desired attributes. */
        if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
                goto out_unlock;
@@ -2547,6 +2548,9 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, 
gfn_t start, gfn_t end,
                        goto out_unlock;
        }
 
+       kvm->attribute_change_in_progress = true;
+       gfn_to_pfn_cache_invalidate_gfns_start(kvm, start, end);
+
        kvm_handle_gfn_range(kvm, &pre_set_range);
 
        for (i = start; i < end; i++) {
@@ -2558,6 +2562,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, 
gfn_t start, gfn_t end,
        kvm_handle_gfn_range(kvm, &post_set_range);
 
 out_unlock:
+       kvm->attribute_change_in_progress = false;
        mutex_unlock(&kvm->slots_lock);
 
        return r;
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index 715f19669d01f..5a53d888e4b18 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -27,12 +27,22 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool 
interruptible,
 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
                                       unsigned long start,
                                       unsigned long end);
+
+void gfn_to_pfn_cache_invalidate_gfns_start(struct kvm *kvm,
+                                           gfn_t start,
+                                           gfn_t end);
 #else
 static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
                                                     unsigned long start,
                                                     unsigned long end)
 {
 }
+
+static inline void gfn_to_pfn_cache_invalidate_gfns_start(struct kvm *kvm,
+                                                         gfn_t start,
+                                                         gfn_t end)
+{
+}
 #endif /* HAVE_KVM_PFNCACHE */
 
 #ifdef CONFIG_KVM_PRIVATE_MEM
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index f0039efb9e1e3..6de934a8a153f 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -57,6 +57,43 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 
unsigned long start,
        spin_unlock(&kvm->gpc_lock);
 }
 
+/*
+ * Identical to `gfn_to_pfn_cache_invalidate_start`, except based on gfns
+ * instead of uhvas.
+ */
+void gfn_to_pfn_cache_invalidate_gfns_start(struct kvm *kvm, gfn_t start, 
gfn_t end)
+{
+       struct gfn_to_pfn_cache *gpc;
+
+       spin_lock(&kvm->gpc_lock);
+       list_for_each_entry(gpc, &kvm->gpc_list, list) {
+               read_lock_irq(&gpc->lock);
+
+               /*
+                * uhva based gpcs must not be used with gmem enabled memslots
+                */
+               if (kvm_is_error_gpa(gpc->gpa)) {
+                       read_unlock_irq(&gpc->lock);
+                       continue;
+               }
+
+               if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+                   gpa_to_gfn(gpc->gpa) >= start && gpa_to_gfn(gpc->gpa) < 
end) {
+                       read_unlock_irq(&gpc->lock);
+
+                       write_lock_irq(&gpc->lock);
+                       if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+                           gpa_to_gfn(gpc->gpa) >= start && 
gpa_to_gfn(gpc->gpa) < end)
+                               gpc->valid = false;
+                       write_unlock_irq(&gpc->lock);
+                       continue;
+               }
+
+               read_unlock_irq(&gpc->lock);
+       }
+       spin_unlock(&kvm->gpc_lock);
+}
+
 static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
                                 unsigned long len)
 {
@@ -141,6 +178,14 @@ static inline bool mmu_notifier_retry_cache(struct kvm 
*kvm, unsigned long mmu_s
        if (kvm->mn_active_invalidate_count)
                return true;
 
+       /*
+        * Similarly to the above, attribute_change_in_progress is set
+        * before gfn_to_pfn_cache_invalidate_start is called in
+        * kvm_vm_set_mem_attributes, and isn't cleared until after
+        * mmu_invalidate_seq is updated.
+        */
+       if (kvm->attribute_change_in_progress)
+               return true;
        /*
         * Ensure mn_active_invalidate_count is read before
         * mmu_invalidate_seq.  This pairs with the smp_wmb() in
-- 
2.46.0


Reply via email to