Initializes a blank level-1 translation table for the second stage
translation and handles freeing it as well.
---
arch/arm/include/asm/kvm_host.h | 4 ++-
arch/arm/include/asm/kvm_mmu.h | 5 ++++
arch/arm/kvm/arm.c | 54 ++++++++++++++++++++++++++++++++++++++-
3 files changed, 61 insertions(+), 2 deletions(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 9fa9b20..5955ff4 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -31,7 +31,9 @@ struct kvm_vcpu;
u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
struct kvm_arch {
- pgd_t *pgd; /* 1-level 2nd stage table */
+ u32 vmid; /* The VMID used for the virt. memory system */
+ pgd_t *pgd; /* 1-level 2nd stage table */
+ u64 vttbr; /* VTTBR value associated with above pgd and vmid */
};
#define EXCEPTION_NONE 0
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index d22aad0..a64ab2d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -37,4 +37,9 @@ void remove_hyp_mappings(pgd_t *hyp_pgd,
unsigned long end);
void free_hyp_pmds(pgd_t *hyp_pgd);
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 4f691be..714f415 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -77,13 +77,56 @@ void kvm_arch_sync_events(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm)
{
- return 0;
+ int ret = 0;
+ phys_addr_t pgd_phys;
+ unsigned long vmid;
+ unsigned long start, end;
+
+
+ mutex_lock(&kvm_vmids_mutex);
+ vmid = find_first_zero_bit(kvm_vmids, VMID_SIZE);
+ if (vmid >= VMID_SIZE) {
+ mutex_unlock(&kvm_vmids_mutex);
+ return -EBUSY;
+ }
+ __set_bit(vmid, kvm_vmids);
+ kvm->arch.vmid = vmid;
+ mutex_unlock(&kvm_vmids_mutex);
+
+ ret = kvm_alloc_stage2_pgd(kvm);
+ if (ret)
+ goto out_fail_alloc;
+
+ pgd_phys = virt_to_phys(kvm->arch.pgd);
+ kvm->arch.vttbr = (pgd_phys & ((1LLU << 40) - 1) & ~((2 << VTTBR_X) -
1)) |
+ ((u64)vmid << 48);
+
+ start = (unsigned long)kvm,
+ end = start + sizeof(struct kvm);
+ ret = create_hyp_mappings(kvm_hyp_pgd, start, end);
+ if (ret)
+ goto out_fail_hyp_mappings;
+
+ return ret;
+out_fail_hyp_mappings:
+ remove_hyp_mappings(kvm_hyp_pgd, start, end);
+out_fail_alloc:
+ clear_bit(vmid, kvm_vmids);
+ return ret;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ kvm_free_stage2_pgd(kvm);
+
+ if (kvm->arch.vmid != 0) {
+ mutex_lock(&kvm_vmids_mutex);
+ clear_bit(kvm->arch.vmid, kvm_vmids);
+ mutex_unlock(&kvm_vmids_mutex);
+ }
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -158,6 +201,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
int err;
struct kvm_vcpu *vcpu;
+ unsigned long start, end;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
@@ -169,7 +213,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
if (err)
goto free_vcpu;
+ start = (unsigned long)vcpu,
+ end = start + sizeof(struct kvm_vcpu);
+ err = create_hyp_mappings(kvm_hyp_pgd, start, end);
+ if (err)
+ goto out_fail_hyp_mappings;
+
return vcpu;
+out_fail_hyp_mappings:
+ remove_hyp_mappings(kvm_hyp_pgd, start, end);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html