This adds KVM_REQ_RING_SOFT_FULL, which is raised when the dirty
ring of the specific VCPU becomes softly full in kvm_dirty_ring_push().
The VCPU is enforced to exit when the request is raised and its
dirty ring is softly full on its entrance.

Suggested-by: Marc Zyngier <[email protected]>
Signed-off-by: Gavin Shan <[email protected]>
---
 arch/x86/kvm/x86.c       | 19 ++++++++++---------
 include/linux/kvm_host.h |  1 +
 virt/kvm/dirty_ring.c    |  4 ++++
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43a6a7efc6ec..18a4da71989e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10264,16 +10264,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        bool req_immediate_exit = false;
 
-       /* Forbid vmenter if vcpu dirty ring is soft-full */
-       if (unlikely(vcpu->kvm->dirty_ring_size &&
-                    kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) {
-               vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
-               trace_kvm_dirty_ring_exit(vcpu);
-               r = 0;
-               goto out;
-       }
-
        if (kvm_request_pending(vcpu)) {
+               /* Forbid vmenter if vcpu dirty ring is soft-full */
+               if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
+                   kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
+                       kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
+                       vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
+                       trace_kvm_dirty_ring_exit(vcpu);
+                       r = 0;
+                       goto out;
+               }
+
                if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
                        r = -EIO;
                        goto out;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4519d3689e1..53fa3134fee0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -157,6 +157,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_VM_DEAD           (1 | KVM_REQUEST_WAIT | 
KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_UNBLOCK           2
 #define KVM_REQ_UNHALT            3
+#define KVM_REQ_RING_SOFT_FULL    4
 #define KVM_REQUEST_ARCH_BASE     8
 
 /*
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index f4c2a6eb1666..f0e49937bc9e 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -149,6 +149,7 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct 
kvm_dirty_ring *ring)
 
 void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
 {
+       struct kvm_vcpu *vcpu = container_of(ring, struct kvm_vcpu, dirty_ring);
        struct kvm_dirty_gfn *entry;
 
        /* It should never get full */
@@ -166,6 +167,9 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 
slot, u64 offset)
        kvm_dirty_gfn_set_dirtied(entry);
        ring->dirty_index++;
        trace_kvm_dirty_ring_push(ring, slot, offset);
+
+       if (kvm_dirty_ring_soft_full(ring))
+               kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
 }
 
 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
-- 
2.23.0

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to