This renames user_mem_abort() to kvm_handle_user_mem_abort(), and
then exports it. The function will be used in asynchronous page fault
to populate a page table entry once the corresponding page is populated
from the backup device (e.g. swap partition):

   * Parameter @fault_status is replace by @esr.
   * Parameter @prefault is added

As the @esr is passed as parameter, not fetched from vCPU struct. This
also introduces the necessasry helpers in esr.h, to manupulate the @esr.
The helpers defined in kvm_emulate.h reuses the newly added helper. This
shouldn't cause functional changes.

Signed-off-by: Gavin Shan <[email protected]>
---
 arch/arm64/include/asm/esr.h         |  5 +++++
 arch/arm64/include/asm/kvm_emulate.h |  8 ++++----
 arch/arm64/include/asm/kvm_host.h    |  4 ++++
 arch/arm64/kvm/mmu.c                 | 18 ++++++++++++------
 4 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..ec0b5d81183c 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -317,8 +317,13 @@
                                         ESR_ELx_CP15_32_ISS_DIR_READ)
 
 #ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
 #include <asm/types.h>
 
+#define esr_get_trap_class(esr)                FIELD_GET(ESR_ELx_EC_MASK, esr)
+#define esr_is_dabt_wnr(esr)           !!(FIELD_GET(ESR_ELx_WNR, esr))
+#define esr_is_dabt_iss1tw(esr)                !!(FIELD_GET(ESR_ELx_S1PTW, 
esr))
+
 static inline bool esr_is_data_abort(u32 esr)
 {
        const u32 ec = ESR_ELx_EC(esr);
diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index bb7aee5927a5..2681d1fe4003 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -323,13 +323,13 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const 
struct kvm_vcpu *vcpu)
 
 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+       return esr_is_dabt_iss1tw(kvm_vcpu_get_hsr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
-               kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+       return (esr_is_dabt_wnr(kvm_vcpu_get_hsr(vcpu)) ||
+               kvm_vcpu_dabt_iss1tw(vcpu)); /* AF/DBM update */
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -350,7 +350,7 @@ static __always_inline bool kvm_vcpu_trap_il_is32bit(const 
struct kvm_vcpu *vcpu
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-       return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+       return esr_get_trap_class(kvm_vcpu_get_hsr(vcpu));
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index ba8cdc304b81..b6c9851b2a65 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -441,6 +441,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_handle_user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+                             struct kvm_memory_slot *memslot,
+                             unsigned long hva, unsigned int esr,
+                             bool prefault);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7a7ddc4558a7..b23778392aa1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1787,11 +1787,15 @@ transparent_hugepage_adjust(struct kvm_memory_slot 
*memslot,
        return PAGE_SIZE;
 }
 
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-                         struct kvm_memory_slot *memslot, unsigned long hva,
-                         unsigned long fault_status)
+int kvm_handle_user_mem_abort(struct kvm_vcpu *vcpu,
+                             phys_addr_t fault_ipa,
+                             struct kvm_memory_slot *memslot,
+                             unsigned long hva,
+                             unsigned int esr,
+                             bool prefault)
 {
        int ret;
+       unsigned int fault_status = (esr & ESR_ELx_FSC_TYPE);
        bool write_fault, writable, force_pte = false;
        bool exec_fault, needs_exec;
        unsigned long mmu_seq;
@@ -1805,8 +1809,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        bool logging_active = memslot_is_logging(memslot);
        unsigned long vma_pagesize, flags = 0;
 
-       write_fault = kvm_is_write_fault(vcpu);
-       exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+       exec_fault = (esr_get_trap_class(esr) == ESR_ELx_EC_IABT_LOW);
+       write_fault = (!exec_fault &&
+                      (esr_is_dabt_wnr(esr) || esr_is_dabt_iss1tw(esr)));
        VM_BUG_ON(write_fault && exec_fault);
 
        if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -2116,7 +2121,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                goto out_unlock;
        }
 
-       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+       ret = kvm_handle_user_mem_abort(vcpu, fault_ipa, memslot, hva,
+                                       kvm_vcpu_get_hsr(vcpu), false);
        if (ret == 0)
                ret = 1;
 out:
-- 
2.23.0

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to