This patch adds a new "flush" callback to iommu_ops, which is supposed
to perform any necessary flushes within given IOMMU domain to make any
changes to mappings of given area [iova; iova + size) be reflected to
IOMMU clients.

The purpose is to let IOMMU drivers skip page-by-page flushes and
replace it with one flush of full address range on devices which support
it.

Signed-off-by: Tomasz Figa <tf...@chromium.org>
---
 drivers/iommu/iommu.c | 33 ++++++++++++++++++++++++++++++---
 include/linux/iommu.h |  2 ++
 2 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 049df49..d6bb8fe 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1286,8 +1286,15 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
        return pgsize;
 }
 
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
-             phys_addr_t paddr, size_t size, int prot)
+static void iommu_flush(struct iommu_domain *domain, unsigned long iova,
+                       size_t size)
+{
+       if (domain->ops->flush)
+               domain->ops->flush(domain, iova, size);
+}
+
+static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+                      phys_addr_t paddr, size_t size, int prot)
 {
        unsigned long orig_iova = iova;
        unsigned int min_pagesz;
@@ -1340,6 +1347,20 @@ int iommu_map(struct iommu_domain *domain, unsigned long 
iova,
 
        return ret;
 }
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+             phys_addr_t paddr, size_t size, int prot)
+{
+       int ret;
+
+       ret = __iommu_map(domain, iova, paddr, size, prot);
+       if (ret)
+               return ret;
+
+       iommu_flush(domain, iova, size);
+
+       return 0;
+}
 EXPORT_SYMBOL_GPL(iommu_map);
 
 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t 
size)
@@ -1389,6 +1410,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned 
long iova, size_t size)
                unmapped += unmapped_page;
        }
 
+       iommu_flush(domain, orig_iova, unmapped);
        trace_unmap(orig_iova, size, unmapped);
        return unmapped;
 }
@@ -1419,19 +1441,24 @@ size_t default_iommu_map_sg(struct iommu_domain 
*domain, unsigned long iova,
                if (!IS_ALIGNED(s->offset, min_pagesz))
                        goto out_err;
 
-               ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
+               ret = __iommu_map(domain, iova + mapped, phys, s->length, prot);
                if (ret)
                        goto out_err;
 
                mapped += s->length;
        }
 
+       iommu_flush(domain, iova, mapped);
+
        return mapped;
 
 out_err:
        /* undo mappings already done */
        iommu_unmap(domain, iova, mapped);
 
+       /* flush in case part of our mapping already got cached */
+       iommu_flush(domain, iova, mapped);
+
        return 0;
 
 }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f9c1b6d..18e59a9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -164,6 +164,8 @@ struct iommu_ops {
                     size_t size);
        size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
                         struct scatterlist *sg, unsigned int nents, int prot);
+       void (*flush)(struct iommu_domain *domain, unsigned long iova,
+                     size_t size);
        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t 
iova);
        int (*add_device)(struct device *dev);
        void (*remove_device)(struct device *dev);
-- 
2.6.0.rc2.230.g3dd15c0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to