From: David Woodhouse <[email protected]>

If we fix a few highmem-related thinkos and a couple of printk format
warnings, the Intel IOMMU driver works fine in a 32-bit kernel.

--
Fixed end address roundup problem in dma_pte_clear_range().

Tested both 32 and 32 PAE modes on Intel X58 and Q35 platforms.

Signed-off-by: Yu Zhao <[email protected]>
---
 arch/x86/Kconfig          |    2 +-
 drivers/pci/intel-iommu.c |   24 +++++++++++-------------
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9c39095..9e9ac5c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1794,7 +1794,7 @@ config PCI_MMCONFIG
 
 config DMAR
        bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
-       depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
+       depends on PCI_MSI && ACPI && EXPERIMENTAL
        help
          DMA remapping (DMAR) devices support enables independent address
          translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f4b7c79..03bc0e5 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -687,15 +687,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, 
u64 addr)
 static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
 {
        int addr_width = agaw_to_width(domain->agaw);
+       int npages;
 
        start &= (((u64)1) << addr_width) - 1;
        end &= (((u64)1) << addr_width) - 1;
        /* in case it's partial page */
        start = PAGE_ALIGN(start);
        end &= PAGE_MASK;
+       npages = (end - start) / VTD_PAGE_SIZE;
 
        /* we don't need lock here, nobody else touches the iova range */
-       while (start < end) {
+       while (npages--) {
                dma_pte_clear_one(domain, start);
                start += VTD_PAGE_SIZE;
        }
@@ -2277,7 +2279,7 @@ static dma_addr_t __intel_map_single(struct device 
*hwdev, phys_addr_t paddr,
 error:
        if (iova)
                __free_iova(&domain->iovad, iova);
-       printk(KERN_ERR"Device %s request: %...@%llx dir %d --- failed\n",
+       printk(KERN_ERR"Device %s request: %...@%llx dir %d --- failed\n",
                pci_name(pdev), size, (unsigned long long)paddr, dir);
        return 0;
 }
@@ -2373,7 +2375,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t 
dev_addr, size_t size,
        start_addr = iova->pfn_lo << PAGE_SHIFT;
        size = aligned_size((u64)dev_addr, size);
 
-       pr_debug("Device %s unmapping: %...@%llx\n",
+       pr_debug("Device %s unmapping: %...@%llx\n",
                pci_name(pdev), size, (unsigned long long)start_addr);
 
        /*  clear the whole page */
@@ -2431,8 +2433,6 @@ void intel_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
        free_pages((unsigned long)vaddr, order);
 }
 
-#define SG_ENT_VIRT_ADDRESS(sg)        (sg_virt((sg)))
-
 void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
                    int nelems, int dir)
 {
@@ -2442,7 +2442,7 @@ void intel_unmap_sg(struct device *hwdev, struct 
scatterlist *sglist,
        unsigned long start_addr;
        struct iova *iova;
        size_t size = 0;
-       void *addr;
+       phys_addr_t addr;
        struct scatterlist *sg;
        struct intel_iommu *iommu;
 
@@ -2458,7 +2458,7 @@ void intel_unmap_sg(struct device *hwdev, struct 
scatterlist *sglist,
        if (!iova)
                return;
        for_each_sg(sglist, sg, nelems, i) {
-               addr = SG_ENT_VIRT_ADDRESS(sg);
+               addr = page_to_phys(sg_page(sg)) + sg->offset;
                size += aligned_size((u64)addr, sg->length);
        }
 
@@ -2485,7 +2485,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
                sg->dma_length = sg->length;
        }
        return nelems;
@@ -2494,7 +2494,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
                 int dir)
 {
-       void *addr;
+       phys_addr_t addr;
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
        struct dmar_domain *domain;
@@ -2518,8 +2518,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist 
*sglist, int nelems,
        iommu = domain_get_iommu(domain);
 
        for_each_sg(sglist, sg, nelems, i) {
-               addr = SG_ENT_VIRT_ADDRESS(sg);
-               addr = (void *)virt_to_phys(addr);
+               addr = page_to_phys(sg_page(sg)) + sg->offset;
                size += aligned_size((u64)addr, sg->length);
        }
 
@@ -2542,8 +2541,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist 
*sglist, int nelems,
        start_addr = iova->pfn_lo << PAGE_SHIFT;
        offset = 0;
        for_each_sg(sglist, sg, nelems, i) {
-               addr = SG_ENT_VIRT_ADDRESS(sg);
-               addr = (void *)virt_to_phys(addr);
+               addr = page_to_phys(sg_page(sg)) + sg->offset;
                size = aligned_size((u64)addr, sg->length);
                ret = domain_page_mapping(domain, start_addr + offset,
                        ((u64)addr) & PAGE_MASK,
-- 
1.5.6.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to