This patch is a beginning attempt to support SMP guests. So far we only
add locking for the second stage PGD stored on the kvm_arch struct.

WARNING: This code is untested and does not yet support SMP guests.

Signed-off-by: Christoffer Dall <c.d...@virtualopensystems.com>
---
 arch/arm/include/asm/kvm_host.h |   12 ++++++--
 arch/arm/kvm/arm.c              |    1 +
 arch/arm/kvm/mmu.c              |   57 +++++++++++++++++++++++++--------------
 3 files changed, 47 insertions(+), 23 deletions(-)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7fcc412..555a6f1 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -31,9 +31,15 @@ struct kvm_vcpu;
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
 
 struct kvm_arch {
-       u32    vmid;    /* The VMID used for the virt. memory system */
-       pgd_t *pgd;     /* 1-level 2nd stage table */
-       u64    vttbr;   /* VTTBR value associated with above pgd and vmid */
+       /* The VMID used for the virt. memory system */
+       u32    vmid;
+
+       /* 1-level 2nd stage table and lock */
+       struct mutex pgd_mutex;
+       pgd_t *pgd;
+
+       /* VTTBR value associated with above pgd and vmid */
+       u64    vttbr;
 };
 
 #define EXCEPTION_NONE      0
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9c5c38e..14ccc4d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -119,6 +119,7 @@ int kvm_arch_init_vm(struct kvm *kvm)
        ret = kvm_alloc_stage2_pgd(kvm);
        if (ret)
                goto out_fail_alloc;
+       mutex_init(&kvm->arch.pgd_mutex);
 
        pgd_phys = virt_to_phys(kvm->arch.pgd);
        kvm->arch.vttbr = pgd_phys & ((1LLU << 40) - 1) & ~((2 << VTTBR_X) - 1);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 50c9571..baeb8a1 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -177,6 +177,9 @@ out:
  * Allocates the 1st level table only of size defined by PGD2_ORDER (can
  * support either full 40-bit input addresses or limited to 32-bit input
  * addresses). Clears the allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
  */
 int kvm_alloc_stage2_pgd(struct kvm *kvm)
 {
@@ -204,6 +207,9 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
  * underlying level-2 and level-3 tables before freeing the actual level-1 
table
  * and setting the struct pointer to NULL.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
  */
 void kvm_free_stage2_pgd(struct kvm *kvm)
 {
@@ -239,49 +245,38 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
        kvm->arch.pgd = NULL;
 }
 
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-                         gfn_t gfn, struct kvm_memory_slot *memslot)
+static int __user_mem_abort(struct kvm *kvm, phys_addr_t addr, pfn_t pfn)
 {
-       pfn_t pfn;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte, new_pte;
 
-       pfn = gfn_to_pfn(vcpu->kvm, gfn);
-
-       if (is_error_pfn(pfn)) {
-               kvm_err(-EFAULT, "Guest gfn %u (0x%08lx) does not have "
-                               "corresponding host mapping",
-                               gfn, gfn << PAGE_SHIFT);
-               return -EFAULT;
-       }
-
        /* Create 2nd stage page table mapping - Level 1 */
-       pgd = vcpu->kvm->arch.pgd + pgd_index(fault_ipa);
-       pud = pud_offset(pgd, fault_ipa);
+       pgd = kvm->arch.pgd + pgd_index(addr);
+       pud = pud_offset(pgd, addr);
        if (pud_none(*pud)) {
-               pmd = pmd_alloc_one(NULL, fault_ipa);
+               pmd = pmd_alloc_one(NULL, addr);
                if (!pmd) {
                        kvm_err(-ENOMEM, "Cannot allocate 2nd stage pmd");
                        return -ENOMEM;
                }
                pud_populate(NULL, pud, pmd);
-               pmd += pmd_index(fault_ipa);
+               pmd += pmd_index(addr);
        } else
-               pmd = pmd_offset(pud, fault_ipa);
+               pmd = pmd_offset(pud, addr);
 
        /* Create 2nd stage page table mapping - Level 2 */
        if (pmd_none(*pmd)) {
-               pte = pte_alloc_one_kernel(NULL, fault_ipa);
+               pte = pte_alloc_one_kernel(NULL, addr);
                if (!pte) {
                        kvm_err(-ENOMEM, "Cannot allocate 2nd stage pte");
                        return -ENOMEM;
                }
                pmd_populate_kernel(NULL, pmd, pte);
-               pte += pte_index(fault_ipa);
+               pte += pte_index(addr);
        } else
-               pte = pte_offset_kernel(pmd, fault_ipa);
+               pte = pte_offset_kernel(pmd, addr);
 
        /* Create 2nd stage page table mapping - Level 3 */
        new_pte = pfn_pte(pfn, PAGE_KVM_GUEST);
@@ -290,6 +285,28 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        return 0;
 }
 
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+                         gfn_t gfn, struct kvm_memory_slot *memslot)
+{
+       pfn_t pfn;
+       int ret;
+
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+       if (is_error_pfn(pfn)) {
+               kvm_err(-EFAULT, "Guest gfn %u (0x%08lx) does not have "
+                               "corresponding host mapping",
+                               gfn, gfn << PAGE_SHIFT);
+               return -EFAULT;
+       }
+
+       mutex_lock(&vcpu->kvm->arch.pgd_mutex);
+       ret = __user_mem_abort(vcpu->kvm, fault_ipa, pfn);
+       mutex_unlock(&vcpu->kvm->arch.pgd_mutex);
+
+       return ret;
+}
+
 /**
  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  * @vcpu: The VCPU pointer

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to