When the guest accesses I/O memory this will create data abort
exceptions and they are handled by decoding the HSR information
(physical address, read/write, length, register) and forwarding reads
and writes to QEMU which performs the device emulation.

This requires changing the general flow somewhat since new calls to run
the VCPU must check if there's a pending MMIO load and perform the write
after QEMU has made the data available.
---
 arch/arm/include/asm/kvm_host.h |    1 
 arch/arm/include/asm/kvm_mmu.h  |    1 
 arch/arm/kvm/arm.c              |   11 ++++
 arch/arm/kvm/arm_mmu.c          |  106 ++++++++++++++++++++++++++++++++++++++-
 4 files changed, 115 insertions(+), 4 deletions(-)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7f96974..5393e25 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -86,6 +86,7 @@ struct kvm_vcpu_arch {
        u32 hpfar;              /* Hyp IPA Fault Address Register */
 
        /* IO related fields */
+       bool mmio_sign_extend;  /* for byte/halfword loads */
        u32 mmio_rd;
 
        /* Misc. fields */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a64ab2d..f06f42d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -40,6 +40,7 @@ void free_hyp_pmds(pgd_t *hyp_pgd);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index abed683..d01f234 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -349,6 +349,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
        int ret;
 
        for (;;) {
+               if (run->exit_reason == KVM_EXIT_MMIO) {
+                       ret = kvm_handle_mmio_return(vcpu, vcpu->run);
+                       if (ret)
+                               break;
+               }
+
                local_irq_save(flags);
                ret = __kvm_vcpu_run(vcpu);
                local_irq_restore(flags);
@@ -367,8 +373,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
                        kvm_err(ret, "Error in handle_exit");
                        break;
                }
+
+               if (run->exit_reason == KVM_EXIT_MMIO)
+                       break;
        }
 
+       if (ret < 0)
+               run->exit_reason = KVM_EXIT_EXCEPTION;
        return ret;
 }
 
diff --git a/arch/arm/kvm/arm_mmu.c b/arch/arm/kvm/arm_mmu.c
index fe27e59..b04a211 100644
--- a/arch/arm/kvm/arm_mmu.c
+++ b/arch/arm/kvm/arm_mmu.c
@@ -16,9 +16,10 @@
 
 #include <linux/mman.h>
 #include <linux/kvm_host.h>
+#include <asm/pgalloc.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
-#include <asm/pgalloc.h>
+#include <asm/kvm_emulate.h>
 
 #include "../mm/mm.h"
 #include "trace.h"
@@ -297,6 +298,105 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        return 0;
 }
 
+/**
+ * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * @vcpu: The VCPU pointer
+ * @run:  The VCPU run struct containing the mmio data
+ *
+ * This should only be called after returning to QEMU for MMIO load emulation.
+ */
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int *dest;
+       unsigned int len;
+       int mask;
+
+       if (!run->mmio.is_write) {
+               dest = &vcpu_reg(vcpu, vcpu->arch.mmio_rd);
+               memset(dest, 0, sizeof(int));
+
+               if (run->mmio.len > 4) {
+                       kvm_err(-EINVAL, "Incorrect mmio length");
+                       return -EINVAL;
+               }
+
+               len = run->mmio.len;
+               memcpy(dest, run->mmio.data, len);
+
+               if (vcpu->arch.mmio_sign_extend && len < 4) {
+                       mask = 1U << ((len * 8) - 1);
+                       *dest = (*dest ^ mask) - mask;
+               }
+       }
+
+       return 0;
+}
+
+static int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                       phys_addr_t fault_ipa, struct kvm_memory_slot *memslot)
+{
+       unsigned long rd, len, instr_len;
+       bool is_write, sign_extend;
+
+       if (!((vcpu->arch.hsr >> 24) & 1) || ((vcpu->arch.hsr >> 8) & 1)) {
+               kvm_err(-EFAULT, "Invalid I/O abort");
+               return -EFAULT;
+       }
+
+       if ((vcpu->arch.hsr >> 7) & 1) {
+               kvm_err(-EFAULT, "Translation table accesses I/O memory");
+               return -EFAULT;
+       }
+
+       switch ((vcpu->arch.hsr >> 22) & 0x3) {
+       case 0: len = 1; break;
+       case 1: len = 2; break;
+       case 2: len = 4; break;
+       default:
+               kvm_err(-EFAULT, "Invalid I/O abort");
+               return -EFAULT;
+       }
+
+       is_write = ((vcpu->arch.hsr >> 6) & 1);
+       sign_extend = ((vcpu->arch.hsr >> 21) & 1);
+       rd = (vcpu->arch.hsr >> 16) & 0xf;
+       BUG_ON(rd > 15);
+
+       if (rd == 15) {
+               kvm_err(-EFAULT, "I/O memory trying to read/write pc");
+               return -EFAULT;
+       }
+
+       /* Get instruction length in bytes */
+       instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2;
+
+       if (!memslot) {
+               /* QEMU hack for missing devices - simply return 0 */
+               if (!is_write)
+                       vcpu_reg(vcpu, rd) = 0;
+               vcpu_reg(vcpu, 15) += instr_len;
+               return 0;
+       }
+
+       /* Export MMIO operations to user space */
+       vcpu->run->exit_reason = KVM_EXIT_MMIO;
+       vcpu->run->mmio.is_write = is_write;
+       vcpu->run->mmio.phys_addr = fault_ipa;
+       vcpu->run->mmio.len = len;
+       vcpu->arch.mmio_sign_extend = sign_extend;
+       vcpu->arch.mmio_rd = rd;
+
+       if (is_write)
+               memcpy(run->mmio.data, &vcpu_reg(vcpu, rd), len);
+
+       /*
+        * The MMIO instruction is emulated and should not be re-executed
+        * in the guest.
+        */
+       vcpu_reg(vcpu, 15) += instr_len;
+       return 0;
+}
+
 #define HSR_ABT_FS     (0x3f)
 #define HPFAR_MASK     (~0xf)
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -335,7 +435,5 @@ io_mem_abort:
                return -EFAULT;
        }
 
-       kvm_msg("I/O address abort...");
-       KVMARM_NOT_IMPLEMENTED();
-       return -EINVAL;
+       return io_mem_abort(vcpu, run, fault_ipa, memslot);
 }

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to