Use the dev->coherent_dma_mask when allocating in the dma-iommu ops api.

Signed-off-by: Tom Murphy <murph...@tcd.ie>
---
 drivers/iommu/dma-iommu.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f303bbe20e51..082fb789e3cf 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -471,7 +471,7 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-               size_t size, int prot)
+               size_t size, int prot, dma_addr_t dma_mask)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -486,7 +486,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, 
phys_addr_t phys,
                size = iova_align(&cookie->iovad, size + iova_off);
        }
 
-       iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+       iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
        if (!iova)
                return DMA_MAPPING_ERROR;
 
@@ -737,7 +737,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
        int prot = dma_info_to_prot(dir, coherent, attrs);
        dma_addr_t dma_handle;
 
-       dma_handle = __iommu_dma_map(dev, phys, size, prot);
+       dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
        if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            dma_handle != DMA_MAPPING_ERROR)
                arch_sync_dma_for_device(dev, phys, size, dir);
@@ -940,7 +940,8 @@ static dma_addr_t iommu_dma_map_resource(struct device 
*dev, phys_addr_t phys,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        return __iommu_dma_map(dev, phys, size,
-                       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+                       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+                       dma_get_mask(dev));
 }
 
 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1049,7 +1050,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
        if (!cpu_addr)
                return NULL;
 
-       *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+       *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+                       dev->coherent_dma_mask);
        if (*handle == DMA_MAPPING_ERROR) {
                __iommu_dma_free(dev, size, cpu_addr);
                return NULL;
@@ -1178,7 +1180,7 @@ static struct iommu_dma_msi_page 
*iommu_dma_get_msi_page(struct device *dev,
        if (!msi_page)
                return NULL;
 
-       iova = __iommu_dma_map(dev, msi_addr, size, prot);
+       iova = __iommu_dma_map(dev, msi_addr, size, prot, dma_get_mask(dev));
        if (iova == DMA_MAPPING_ERROR)
                goto out_free_page;
 
-- 
2.20.1

Reply via email to