When manual dirty log reprotect will be enabled, kvm_get_dirty_log_protect's
pointer argument will always be false on exit, because no TLB flush is needed
until the manual re-protection operation.  Rename it from "is_dirty" to "flush",
which more accurately tells the caller what they have to do with it.

Signed-off-by: Paolo Bonzini <[email protected]>
---
 arch/mips/kvm/mips.c     | 6 +++---
 arch/x86/kvm/x86.c       | 6 +++---
 include/linux/kvm_host.h | 2 +-
 virt/kvm/arm/arm.c       | 6 +++---
 virt/kvm/kvm_main.c      | 6 +++---
 5 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 1fcc4d149054..3898e657952e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1004,14 +1004,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct 
kvm_dirty_log *log)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
-       bool is_dirty = false;
+       bool flush = false;
        int r;
 
        mutex_lock(&kvm->slots_lock);
 
-       r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+       r = kvm_get_dirty_log_protect(kvm, log, &flush);
 
-       if (is_dirty) {
+       if (flush) {
                slots = kvm_memslots(kvm);
                memslot = id_to_memslot(slots, log->slot);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dc902e5139c2..ff6a8411a15c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4410,7 +4410,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  */
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 {
-       bool is_dirty = false;
+       bool flush = false;
        int r;
 
        mutex_lock(&kvm->slots_lock);
@@ -4421,14 +4421,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct 
kvm_dirty_log *log)
        if (kvm_x86_ops->flush_log_dirty)
                kvm_x86_ops->flush_log_dirty(kvm);
 
-       r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+       r = kvm_get_dirty_log_protect(kvm, log, &flush);
 
        /*
         * All the TLBs can be flushed out of mmu lock, see the comments in
         * kvm_mmu_slot_remove_write_access().
         */
        lockdep_assert_held(&kvm->slots_lock);
-       if (is_dirty)
+       if (flush)
                kvm_flush_remote_tlbs(kvm);
 
        mutex_unlock(&kvm->slots_lock);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 54cc06dd7e6c..8c56b2873b13 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -753,7 +753,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
                        struct kvm_dirty_log *log, int *is_dirty);
 
 int kvm_get_dirty_log_protect(struct kvm *kvm,
-                       struct kvm_dirty_log *log, bool *is_dirty);
+                             struct kvm_dirty_log *log, bool *flush);
 
 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
                                        struct kvm_memory_slot *slot,
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 23774970c9df..120a2663dab9 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1205,14 +1205,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
  */
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 {
-       bool is_dirty = false;
+       bool flush = false;
        int r;
 
        mutex_lock(&kvm->slots_lock);
 
-       r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+       r = kvm_get_dirty_log_protect(kvm, log, &flush);
 
-       if (is_dirty)
+       if (flush)
                kvm_flush_remote_tlbs(kvm);
 
        mutex_unlock(&kvm->slots_lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2d842fd20cd3..b1eae5394411 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1154,7 +1154,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
  *
  */
 int kvm_get_dirty_log_protect(struct kvm *kvm,
-                       struct kvm_dirty_log *log, bool *is_dirty)
+                       struct kvm_dirty_log *log, bool *flush)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
@@ -1181,7 +1181,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
        memset(dirty_bitmap_buffer, 0, n);
 
        spin_lock(&kvm->mmu_lock);
-       *is_dirty = false;
+       *flush = false;
        for (i = 0; i < n / sizeof(long); i++) {
                unsigned long mask;
                gfn_t offset;
@@ -1189,7 +1189,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
                if (!dirty_bitmap[i])
                        continue;
 
-               *is_dirty = true;
+               *flush = true;
 
                mask = xchg(&dirty_bitmap[i], 0);
                dirty_bitmap_buffer[i] = mask;
-- 
1.8.3.1


Reply via email to