The current iommu_dma_get_sgtable code does not properly handle memory
from the page allocator that hasn't been remapped, which can happen in
the rare case of allocations for a coherent device that aren't allowed
to block.

Fix this by replacing iommu_dma_get_sgtable with a slightly tweaked copy
of dma_common_get_sgtable with special handling for the remapped array
of pages allocated from __iommu_dma_alloc.

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 drivers/iommu/dma-iommu.c | 49 +++++++++++++++++++--------------------
 1 file changed, 24 insertions(+), 25 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 26f479d49103..8f3dc6ab3da1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -620,6 +620,18 @@ static int iommu_dma_mmap_remap(void *cpu_addr, size_t 
size,
        return ret;
 }
 
+static int iommu_dma_get_sgtable_remap(struct sg_table *sgt, void *cpu_addr,
+               size_t size)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (WARN_ON(!area || !area->pages))
+               return -ENXIO;
+       return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
+                       GFP_KERNEL);
+}
+
 static void iommu_dma_sync_single_for_cpu(struct device *dev,
                dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 {
@@ -1080,37 +1092,24 @@ static int iommu_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 }
 
-static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page 
*page,
-               size_t size)
-{
-       int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-
-       if (!ret)
-               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return ret;
-}
-
 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs)
 {
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct vm_struct *area = find_vm_area(cpu_addr);
-
-       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               /*
-                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-                * hence in the vmalloc space.
-                */
-               struct page *page = vmalloc_to_page(cpu_addr);
-               return __iommu_dma_get_sgtable_page(sgt, page, size);
-       }
+       struct page *page;
+       int ret;
 
-       if (WARN_ON(!area || !area->pages))
-               return -ENXIO;
+       if (is_vmalloc_addr(cpu_addr)) {
+               if (!(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+                       return iommu_dma_get_sgtable_remap(sgt, cpu_addr, size);
+               page = vmalloc_to_page(cpu_addr);
+       } else
+               page = virt_to_page(cpu_addr);
 
-       return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
-                                        GFP_KERNEL);
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (!ret)
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return ret;
 }
 
 static const struct dma_map_ops iommu_dma_ops = {
-- 
2.20.1

Reply via email to