This moves the definition of "struct kvm_async_pf" and the related
functions after "struct kvm_vcpu" so that newly added inline functions
in the subsequent patches can dereference "struct kvm_vcpu" properly.
Otherwise, the unexpected build error will be raised:

   error: dereferencing pointer to incomplete type ‘struct kvm_vcpu’
   return !list_empty_careful(&vcpu->async_pf.done);
                                   ^~
Since we're here, the sepator between type and field in "struct kvm_vcpu"
is replaced by tab. The empty stub kvm_check_async_pf_completion() is also
added on !CONFIG_KVM_ASYNC_PF, which is needed by subsequent patches to
support asynchronous page fault on ARM64.

Signed-off-by: Gavin Shan <[email protected]>
---
 include/linux/kvm_host.h | 44 +++++++++++++++++++++-------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ae7735b490b4..85b61a456f1c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -199,27 +199,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum 
kvm_bus bus_idx,
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
                                         gpa_t addr);
 
-#ifdef CONFIG_KVM_ASYNC_PF
-struct kvm_async_pf {
-       struct work_struct work;
-       struct list_head link;
-       struct list_head queue;
-       struct kvm_vcpu *vcpu;
-       struct mm_struct *mm;
-       gpa_t cr2_or_gpa;
-       unsigned long addr;
-       struct kvm_arch_async_pf arch;
-       bool   wakeup_all;
-       bool notpresent_injected;
-};
-
-void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
-void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                       unsigned long hva, struct kvm_arch_async_pf *arch);
-int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
-#endif
-
 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 struct kvm_gfn_range {
        struct kvm_memory_slot *slot;
@@ -346,6 +325,29 @@ struct kvm_vcpu {
        struct kvm_dirty_ring dirty_ring;
 };
 
+#ifdef CONFIG_KVM_ASYNC_PF
+struct kvm_async_pf {
+       struct work_struct              work;
+       struct list_head                link;
+       struct list_head                queue;
+       struct kvm_vcpu                 *vcpu;
+       struct mm_struct                *mm;
+       gpa_t                           cr2_or_gpa;
+       unsigned long                   addr;
+       struct kvm_arch_async_pf        arch;
+       bool                            wakeup_all;
+       bool                            notpresent_injected;
+};
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                       unsigned long hva, struct kvm_arch_async_pf *arch);
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) { }
+#endif
+
 /* must be called with irqs disabled */
 static __always_inline void guest_enter_irqoff(void)
 {
-- 
2.23.0

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to