From: Robin Murphy <robin.mur...@arm.com>

Since we duplicate the find_vm_area() logic a few times in places where
we only care aboute the pages, factor out a helper to abstract it.

Signed-off-by: Robin Murphy <robin.mur...@arm.com>
[hch: don't warn when not finding a region, as we'll rely on that later]
Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 drivers/iommu/dma-iommu.c | 32 ++++++++++++++++++++------------
 1 file changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4596e4860da8..e870ea59a34d 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -554,6 +554,15 @@ static struct page **__iommu_dma_alloc_pages(struct device 
*dev,
        return pages;
 }
 
+static struct page **__iommu_dma_get_pages(void *cpu_addr)
+{
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (!area || !area->pages)
+               return NULL;
+       return area->pages;
+}
+
 /**
  * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
  * @dev: Device which owns this buffer
@@ -1042,11 +1051,11 @@ static void iommu_dma_free(struct device *dev, size_t 
size, void *cpu_addr,
                dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else if (is_vmalloc_addr(cpu_addr)){
-               struct vm_struct *area = find_vm_area(cpu_addr);
+               struct page **pages = __iommu_dma_get_pages(cpu_addr);
 
-               if (WARN_ON(!area || !area->pages))
+               if (!pages)
                        return;
-               __iommu_dma_free(dev, area->pages, iosize, &handle);
+               __iommu_dma_free(dev, pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
                __iommu_dma_unmap(dev, handle, iosize);
@@ -1078,7 +1087,7 @@ static int iommu_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
 {
        unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
-       struct vm_struct *area;
+       struct page **pages;
        int ret;
 
        vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
@@ -1103,11 +1112,10 @@ static int iommu_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
                return __iommu_dma_mmap_pfn(vma, pfn, size);
        }
 
-       area = find_vm_area(cpu_addr);
-       if (WARN_ON(!area || !area->pages))
+       pages = __iommu_dma_get_pages(cpu_addr);
+       if (!pages)
                return -ENXIO;
-
-       return __iommu_dma_mmap(area->pages, size, vma);
+       return __iommu_dma_mmap(pages, size, vma);
 }
 
 static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page 
*page,
@@ -1125,7 +1133,7 @@ static int iommu_dma_get_sgtable(struct device *dev, 
struct sg_table *sgt,
                unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct vm_struct *area = find_vm_area(cpu_addr);
+       struct page **pages;
 
        if (!is_vmalloc_addr(cpu_addr)) {
                struct page *page = virt_to_page(cpu_addr);
@@ -1141,10 +1149,10 @@ static int iommu_dma_get_sgtable(struct device *dev, 
struct sg_table *sgt,
                return __iommu_dma_get_sgtable_page(sgt, page, size);
        }
 
-       if (WARN_ON(!area || !area->pages))
+       pages = __iommu_dma_get_pages(cpu_addr);
+       if (!pages)
                return -ENXIO;
-
-       return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
+       return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
                                         GFP_KERNEL);
 }
 
-- 
2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to