So its possible to iommu map a memslot before making it visible to 
kvm.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

Index: kvm/include/linux/kvm_host.h
===================================================================
--- kvm.orig/include/linux/kvm_host.h
+++ kvm/include/linux/kvm_host.h
@@ -439,8 +439,7 @@ void kvm_free_irq_source_id(struct kvm *
 #define KVM_IOMMU_CACHE_COHERENCY      0x1
 
 #ifdef CONFIG_IOMMU_API
-int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
-                       unsigned long npages);
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 int kvm_iommu_map_guest(struct kvm *kvm);
 int kvm_iommu_unmap_guest(struct kvm *kvm);
 int kvm_assign_device(struct kvm *kvm,
Index: kvm/virt/kvm/iommu.c
===================================================================
--- kvm.orig/virt/kvm/iommu.c
+++ kvm/virt/kvm/iommu.c
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(stru
 static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages);
 
-int kvm_iommu_map_pages(struct kvm *kvm,
-                       gfn_t base_gfn, unsigned long npages)
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
-       gfn_t gfn = base_gfn;
+       gfn_t gfn = slot->base_gfn;
+       unsigned long npages = slot->npages;
        pfn_t pfn;
        int i, r = 0;
        struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
                if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
                        continue;
 
-               pfn = gfn_to_pfn(kvm, gfn);
+               pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
                r = iommu_map_range(domain,
                                    gfn_to_gpa(gfn),
                                    pfn_to_hpa(pfn),
@@ -69,7 +69,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
        return 0;
 
 unmap_pages:
-       kvm_iommu_put_pages(kvm, base_gfn, i);
+       kvm_iommu_put_pages(kvm, slot->base_gfn, i);
        return r;
 }
 
@@ -81,8 +81,7 @@ static int kvm_iommu_map_memslots(struct
        slots = kvm->memslots;
 
        for (i = 0; i < slots->nmemslots; i++) {
-               r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
-                                       slots->memslots[i].npages);
+               r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
                if (r)
                        break;
        }
Index: kvm/virt/kvm/kvm_main.c
===================================================================
--- kvm.orig/virt/kvm/kvm_main.c
+++ kvm/virt/kvm/kvm_main.c
@@ -684,7 +684,7 @@ skip_lpage:
        spin_unlock(&kvm->mmu_lock);
 #ifdef CONFIG_DMAR
        /* map the pages in iommu page table */
-       r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+       r = kvm_iommu_map_pages(kvm, memslot);
        if (r)
                goto out;
 #endif


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to