On 03/06/2019 04:16, Lu Baolu wrote: > IOMMU hardware always use paging for DMA remapping. The > minimum mapped window is a page size. The device drivers > may map buffers not filling whole IOMMU window. It allows > device to access to possibly unrelated memory and various > malicious devices can exploit this to perform DMA attack. > > This introduces the bouce buffer mechanism for DMA buffers > which doesn't fill a minimal IOMMU page. It could be used > by various vendor specific IOMMU drivers as long as the > DMA domain is managed by the generic IOMMU layer. Below > APIs are added: > > * iommu_bounce_map(dev, addr, paddr, size, dir, attrs) > - Map a buffer start at DMA address @addr in bounce page > manner. For buffer parts that doesn't cross a whole > minimal IOMMU page, the bounce page policy is applied. > A bounce page mapped by swiotlb will be used as the DMA > target in the IOMMU page table. Otherwise, the physical > address @paddr is mapped instead. > > * iommu_bounce_unmap(dev, addr, size, dir, attrs) > - Unmap the buffer mapped with iommu_bounce_map(). The bounce > page will be torn down after the bounced data get synced. > > * iommu_bounce_sync(dev, addr, size, dir, target) > - Synce the bounced data in case the bounce mapped buffer is > reused. > > The whole APIs are included within a kernel option IOMMU_BOUNCE_PAGE. > It's useful for cases where bounce page doesn't needed, for example, > embedded cases. > > Cc: Ashok Raj <[email protected]> > Cc: Jacob Pan <[email protected]> > Cc: Kevin Tian <[email protected]> > Cc: Alan Cox <[email protected]> > Cc: Mika Westerberg <[email protected]> > Signed-off-by: Lu Baolu <[email protected]> > --- > drivers/iommu/Kconfig | 14 +++++ > drivers/iommu/iommu.c | 119 ++++++++++++++++++++++++++++++++++++++++++ > include/linux/iommu.h | 35 +++++++++++++ > 3 files changed, 168 insertions(+) > > diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig > index 83664db5221d..d837ec3f359b 100644 > --- a/drivers/iommu/Kconfig > +++ b/drivers/iommu/Kconfig > @@ -86,6 +86,20 @@ config IOMMU_DEFAULT_PASSTHROUGH > > If unsure, say N here. > > +config IOMMU_BOUNCE_PAGE > + bool "Use bounce page for untrusted devices" > + depends on IOMMU_API > + select SWIOTLB > + help > + IOMMU hardware always use paging for DMA remapping. The minimum > + mapped window is a page size. The device drivers may map buffers > + not filling whole IOMMU window. This allows device to access to > + possibly unrelated memory and malicious device can exploit this > + to perform a DMA attack. Select this to use a bounce page for the > + buffer which doesn't fill a whole IOMU page. > + > + If unsure, say N here. > + > config OF_IOMMU > def_bool y > depends on OF && IOMMU_API > diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c > index 2a906386bb8e..fa44f681a82b 100644 > --- a/drivers/iommu/iommu.c > +++ b/drivers/iommu/iommu.c > @@ -2246,3 +2246,122 @@ int iommu_sva_get_pasid(struct iommu_sva *handle) > return ops->sva_get_pasid(handle); > } > EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); > + > +#ifdef CONFIG_IOMMU_BOUNCE_PAGE > + > +/* > + * Bounce buffer support for external devices: > + * > + * IOMMU hardware always use paging for DMA remapping. The minimum mapped > + * window is a page size. The device drivers may map buffers not filling > + * whole IOMMU window. This allows device to access to possibly unrelated > + * memory and malicious device can exploit this to perform a DMA attack. > + * Use bounce pages for the buffer which doesn't fill whole IOMMU pages. > + */ > + > +static inline size_t > +get_aligned_size(struct iommu_domain *domain, dma_addr_t addr, size_t size) > +{ > + unsigned long page_size = 1 << __ffs(domain->pgsize_bitmap); > + unsigned long offset = page_size - 1; > + > + return ALIGN((addr & offset) + size, page_size); > +} > + > +dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova, > + phys_addr_t paddr, size_t size, > + enum dma_data_direction dir, > + unsigned long attrs) > +{ > + struct iommu_domain *domain; > + unsigned int min_pagesz; > + phys_addr_t tlb_addr; > + size_t aligned_size; > + int prot = 0; > + int ret; > + > + domain = iommu_get_dma_domain(dev); > + if (!domain) > + return DMA_MAPPING_ERROR; > + > + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) > + prot |= IOMMU_READ; > + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) > + prot |= IOMMU_WRITE; > + > + aligned_size = get_aligned_size(domain, paddr, size); > + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); > + > + /* > + * If both the physical buffer start address and size are > + * page aligned, we don't need to use a bounce page. > + */ > + if (!IS_ALIGNED(paddr | size, min_pagesz)) { > + tlb_addr = swiotlb_tbl_map_single(dev, > + __phys_to_dma(dev, io_tlb_start), > + paddr, size, aligned_size, dir, attrs); > + if (tlb_addr == DMA_MAPPING_ERROR) > + return DMA_MAPPING_ERROR; > + } else { > + tlb_addr = paddr; > + } > + > + ret = iommu_map(domain, iova, tlb_addr, aligned_size, prot); > + if (ret) { > + swiotlb_tbl_unmap_single(dev, tlb_addr, size, > + aligned_size, dir, attrs); You would probably want to check, whether @tlb_addr came from swiotlb_tbl_map_single. (is_swiotlb_buffer() or reuse predicate above).
> + return DMA_MAPPING_ERROR;
> + }
> +
> + return iova;
> +}
> +EXPORT_SYMBOL_GPL(iommu_bounce_map);
> +
> +static inline phys_addr_t
> +iova_to_tlb_addr(struct iommu_domain *domain, dma_addr_t addr)
> +{
> + if (unlikely(!domain->ops || !domain->ops->iova_to_phys))
> + return 0;
> +
> + return domain->ops->iova_to_phys(domain, addr);
> +}
> +
> +void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
> + enum dma_data_direction dir, unsigned long attrs)
> +{
> + struct iommu_domain *domain;
> + phys_addr_t tlb_addr;
> + size_t aligned_size;
> +
> + domain = iommu_get_dma_domain(dev);
> + if (WARN_ON(!domain))
> + return;
> +
> + aligned_size = get_aligned_size(domain, iova, size);
> + tlb_addr = iova_to_tlb_addr(domain, iova);
> + if (WARN_ON(!tlb_addr))
> + return;
> +
> + iommu_unmap(domain, iova, aligned_size);
> + if (is_swiotlb_buffer(tlb_addr))
Is there any chance, this @tlb_addr is a swiotlb buffer, but owned by an
API user? I mean something like
iommu_bounce_map(swiotlb_tbl_map_single()).
Then, to retain ownership semantic, we shouldn't unmap it. Maybe to
check and fail iommu_bounce_map() to be sure?
> + swiotlb_tbl_unmap_single(dev, tlb_addr, size,
> + aligned_size, dir, attrs);
> +}
> +EXPORT_SYMBOL_GPL(iommu_bounce_unmap);
> +
> +void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
> + enum dma_data_direction dir, enum dma_sync_target target)
> +{
> + struct iommu_domain *domain;
> + phys_addr_t tlb_addr;
> +
> + domain = iommu_get_dma_domain(dev);
> + if (WARN_ON(!domain))
> + return;
> +
> + tlb_addr = iova_to_tlb_addr(domain, addr);
> + if (is_swiotlb_buffer(tlb_addr))
> + swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
> +}
> +EXPORT_SYMBOL_GPL(iommu_bounce_sync);
> +#endif /* CONFIG_IOMMU_BOUNCE_PAGE */
> diff --git a/include/linux/iommu.h b/include/linux/iommu.h
> index 91af22a344e2..814c0da64692 100644
> --- a/include/linux/iommu.h
> +++ b/include/linux/iommu.h
> @@ -25,6 +25,8 @@
> #include <linux/errno.h>
> #include <linux/err.h>
> #include <linux/of.h>
> +#include <linux/swiotlb.h>
> +#include <linux/dma-direct.h>
>
> #define IOMMU_READ (1 << 0)
> #define IOMMU_WRITE (1 << 1)
> @@ -499,6 +501,39 @@ int iommu_sva_set_ops(struct iommu_sva *handle,
> const struct iommu_sva_ops *ops);
> int iommu_sva_get_pasid(struct iommu_sva *handle);
>
> +#ifdef CONFIG_IOMMU_BOUNCE_PAGE
> +dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova,
> + phys_addr_t paddr, size_t size,
> + enum dma_data_direction dir,
> + unsigned long attrs);
> +void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
> + enum dma_data_direction dir, unsigned long attrs);
> +void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
> + enum dma_data_direction dir,
> + enum dma_sync_target target);
> +#else
> +static inline
> +dma_addr_t iommu_bounce_map(struct device *dev, dma_addr_t iova,
> + phys_addr_t paddr, size_t size,
> + enum dma_data_direction dir,
> + unsigned long attrs)
> +{
> + return DMA_MAPPING_ERROR;
> +}
> +
> +static inline
> +void iommu_bounce_unmap(struct device *dev, dma_addr_t iova, size_t size,
> + enum dma_data_direction dir, unsigned long attrs)
> +{
> +}
> +
> +static inline
> +void iommu_bounce_sync(struct device *dev, dma_addr_t addr, size_t size,
> + enum dma_data_direction dir, enum dma_sync_target target)
> +{
> +}
> +#endif /* CONFIG_IOMMU_BOUNCE_PAGE */
> +
> #else /* CONFIG_IOMMU_API */
>
> struct iommu_ops {};
>
--
Yours sincerely,
Pavel Begunkov
signature.asc
Description: OpenPGP digital signature
