+ help
+ IOMMU hardware always use paging for DMA remapping. The minimum
+ mapped window is a page size. The device drivers may map buffers
+ not filling whole IOMMU window. This allows device to access to
+ possibly unrelated memory and malicious device can exploit this
+ to perform a DMA attack. Select this to use a bounce page for the
+ buffer which doesn't fill a whole IOMU page.
+
+ If unsure, say N here.
+
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2a906386bb8e..fa44f681a82b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2246,3 +2246,122 @@ int iommu_sva_get_pasid(struct iommu_sva *handle)
return ops->sva_get_pasid(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+
+#ifdef CONFIG_IOMMU_BOUNCE_PAGE
+
+/*
+ * Bounce buffer support for external devices:
+ *
+ * IOMMU hardware always use paging for DMA remapping. The minimum mapped
+ * window is a page size. The device drivers may map buffers not filling
+ * whole IOMMU window. This allows device to access to possibly unrelated
+ * memory and malicious device can exploit this to perform a DMA attack.
+ * Use bounce pages for the buffer which doesn't fill whole IOMMU pages.
+ */
+
+static inline size_t
+get_aligned_size(struct iommu_domain *domain, dma_addr_t addr, size_t size)
+{
+ unsigned long page_size = 1 << __ffs(domain->pgsize_bitmap);
+ unsigned long offset = page_size - 1;
+
+ return ALIGN((addr & offset) + size, page_size);
+}
+
+dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova,
+ phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iommu_domain *domain;
+ unsigned int min_pagesz;
+ phys_addr_t tlb_addr;
+ size_t aligned_size;
+ int prot = 0;
+ int ret;
+
+ domain = iommu_get_dma_domain(dev);
+ if (!domain)
+ return DMA_MAPPING_ERROR;
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= IOMMU_READ;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= IOMMU_WRITE;
+
+ aligned_size = get_aligned_size(domain, paddr, size);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (!IS_ALIGNED(paddr | size, min_pagesz)) {
+ tlb_addr = swiotlb_tbl_map_single(dev,
+ __phys_to_dma(dev, io_tlb_start),
+ paddr, size, aligned_size, dir, attrs);
+ if (tlb_addr == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
+ } else {
+ tlb_addr = paddr;
+ }
+
+ ret = iommu_map(domain, iova, tlb_addr, aligned_size, prot);
+ if (ret) {
+ swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+ aligned_size, dir, attrs);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return iova;
+}
+EXPORT_SYMBOL_GPL(iommu_bounce_map);
+
+static inline phys_addr_t
+iova_to_tlb_addr(struct iommu_domain *domain, dma_addr_t addr)
+{
+ if (unlikely(!domain->ops || !domain->ops->iova_to_phys))
+ return 0;
+
+ return domain->ops->iova_to_phys(domain, addr);
+}
+
+void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain;
+ phys_addr_t tlb_addr;
+ size_t aligned_size;
+
+ domain = iommu_get_dma_domain(dev);
+ if (WARN_ON(!domain))
+ return;
+
+ aligned_size = get_aligned_size(domain, iova, size);
+ tlb_addr = iova_to_tlb_addr(domain, iova);
+ if (WARN_ON(!tlb_addr))
+ return;
+
+ iommu_unmap(domain, iova, aligned_size);
+ if (is_swiotlb_buffer(tlb_addr))
+ swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+ aligned_size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(iommu_bounce_unmap);
+
+void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, enum dma_sync_target target)
+{
+ struct iommu_domain *domain;
+ phys_addr_t tlb_addr;
+
+ domain = iommu_get_dma_domain(dev);
+ if (WARN_ON(!domain))
+ return;
+
+ tlb_addr = iova_to_tlb_addr(domain, addr);
+ if (is_swiotlb_buffer(tlb_addr))
+ swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
+}
+EXPORT_SYMBOL_GPL(iommu_bounce_sync);
+#endif /* CONFIG_IOMMU_BOUNCE_PAGE */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 91af22a344e2..814c0da64692 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -25,6 +25,8 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/swiotlb.h>
+#include <linux/dma-direct.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -499,6 +501,39 @@ int iommu_sva_set_ops(struct iommu_sva *handle,
const struct iommu_sva_ops *ops);
int iommu_sva_get_pasid(struct iommu_sva *handle);
+#ifdef CONFIG_IOMMU_BOUNCE_PAGE
+dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova,
+ phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs);
+void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ enum dma_sync_target target);
+#else
+static inline
+dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova,
+ phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+
+static inline
+void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+
+static inline
+void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, enum dma_sync_target target)
+{
+}
+#endif /* CONFIG_IOMMU_BOUNCE_PAGE */
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
--
2.17.1