Re: [PATCH v5 3/7] iommu: Allow the dma-iommu api to use bounce buffers

2020-11-23 Thread Lu Baolu

Hi Will,

On 2020/11/23 19:47, Will Deacon wrote:

On Mon, Nov 23, 2020 at 07:40:57PM +0800, Lu Baolu wrote:

On 2020/11/23 18:08, Christoph Hellwig wrote:

+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+   iova_offset(iovad, phys | org_size)) {
+   aligned_size = iova_align(iovad, org_size);
+   phys = swiotlb_tbl_map_single(dev,
+   phys_to_dma(dev, io_tlb_start),
+   phys, org_size, aligned_size, dir, attrs);


swiotlb_tbl_map_single takes one less argument in 5.10-rc now.



Yes. But Will's iommu/next branch is based on 5.10-rc3. I synced with
him, we agreed to keep it 5.10-rc3 and resolve this conflict when
merging it.


That's right, although I failed to appreciate the conflict was due to a
change in function prototype rather than just a context collision. So
I've updated the vt-d branch to contain the stuff fron Konrad:

https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/log/?h=for-next/iommu/vt-d

Sorry for messing you around!


It's okay. I will re-base the patch series later.

Best regards,
baolu


___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v5 3/7] iommu: Allow the dma-iommu api to use bounce buffers

2020-11-23 Thread Will Deacon
On Mon, Nov 23, 2020 at 07:40:57PM +0800, Lu Baolu wrote:
> On 2020/11/23 18:08, Christoph Hellwig wrote:
> > > + /*
> > > +  * If both the physical buffer start address and size are
> > > +  * page aligned, we don't need to use a bounce page.
> > > +  */
> > > + if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
> > > + iova_offset(iovad, phys | org_size)) {
> > > + aligned_size = iova_align(iovad, org_size);
> > > + phys = swiotlb_tbl_map_single(dev,
> > > + phys_to_dma(dev, io_tlb_start),
> > > + phys, org_size, aligned_size, dir, attrs);
> > 
> > swiotlb_tbl_map_single takes one less argument in 5.10-rc now.
> > 
> 
> Yes. But Will's iommu/next branch is based on 5.10-rc3. I synced with
> him, we agreed to keep it 5.10-rc3 and resolve this conflict when
> merging it.

That's right, although I failed to appreciate the conflict was due to a
change in function prototype rather than just a context collision. So
I've updated the vt-d branch to contain the stuff fron Konrad:

https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/log/?h=for-next/iommu/vt-d

Sorry for messing you around!

Will
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v5 3/7] iommu: Allow the dma-iommu api to use bounce buffers

2020-11-23 Thread Lu Baolu

Hi Christoph,

On 2020/11/23 18:08, Christoph Hellwig wrote:

+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+   iova_offset(iovad, phys | org_size)) {
+   aligned_size = iova_align(iovad, org_size);
+   phys = swiotlb_tbl_map_single(dev,
+   phys_to_dma(dev, io_tlb_start),
+   phys, org_size, aligned_size, dir, attrs);


swiotlb_tbl_map_single takes one less argument in 5.10-rc now.



Yes. But Will's iommu/next branch is based on 5.10-rc3. I synced with
him, we agreed to keep it 5.10-rc3 and resolve this conflict when
merging it.

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v5 3/7] iommu: Allow the dma-iommu api to use bounce buffers

2020-11-23 Thread Christoph Hellwig
> + /*
> +  * If both the physical buffer start address and size are
> +  * page aligned, we don't need to use a bounce page.
> +  */
> + if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
> + iova_offset(iovad, phys | org_size)) {
> + aligned_size = iova_align(iovad, org_size);
> + phys = swiotlb_tbl_map_single(dev,
> + phys_to_dma(dev, io_tlb_start),
> + phys, org_size, aligned_size, dir, attrs);

swiotlb_tbl_map_single takes one less argument in 5.10-rc now.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v5 3/7] iommu: Allow the dma-iommu api to use bounce buffers

2020-11-20 Thread Lu Baolu
From: Tom Murphy 

Allow the dma-iommu api to use bounce buffers for untrusted devices.
This is a copy of the intel bounce buffer code.

Signed-off-by: Tom Murphy 
Co-developed-by: Lu Baolu 
Signed-off-by: Lu Baolu 
Tested-by: Logan Gunthorpe 
---
 drivers/iommu/dma-iommu.c | 163 +++---
 1 file changed, 150 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index de521e22bafb..10dafbc3d9e0 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,9 +20,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
+#include 
 
 struct iommu_dma_msi_page {
struct list_headlist;
@@ -499,6 +501,31 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
 }
 
+static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+   size_t size, enum dma_data_direction dir,
+   unsigned long attrs)
+{
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = >iovad;
+   phys_addr_t phys;
+
+   phys = iommu_iova_to_phys(domain, dma_addr);
+   if (WARN_ON(!phys))
+   return;
+
+   __iommu_dma_unmap(dev, dma_addr, size);
+
+   if (unlikely(is_swiotlb_buffer(phys)))
+   swiotlb_tbl_unmap_single(dev, phys, size,
+   iova_align(iovad, size), dir, attrs);
+}
+
+static bool dev_is_untrusted(struct device *dev)
+{
+   return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+}
+
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
 {
@@ -524,6 +551,55 @@ static dma_addr_t __iommu_dma_map(struct device *dev, 
phys_addr_t phys,
return iova + iova_off;
 }
 
+static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+   size_t org_size, dma_addr_t dma_mask, bool coherent,
+   enum dma_data_direction dir, unsigned long attrs)
+{
+   int prot = dma_info_to_prot(dir, coherent, attrs);
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = >iovad;
+   size_t aligned_size = org_size;
+   void *padding_start;
+   size_t padding_size;
+   dma_addr_t iova;
+
+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+   iova_offset(iovad, phys | org_size)) {
+   aligned_size = iova_align(iovad, org_size);
+   phys = swiotlb_tbl_map_single(dev,
+   phys_to_dma(dev, io_tlb_start),
+   phys, org_size, aligned_size, dir, attrs);
+
+   if (phys == DMA_MAPPING_ERROR)
+   return DMA_MAPPING_ERROR;
+
+   /* Cleanup the padding area. */
+   padding_start = phys_to_virt(phys);
+   padding_size = aligned_size;
+
+   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+   (dir == DMA_TO_DEVICE ||
+dir == DMA_BIDIRECTIONAL)) {
+   padding_start += org_size;
+   padding_size -= org_size;
+   }
+
+   memset(padding_start, 0, padding_size);
+   }
+
+   iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+   if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
+   swiotlb_tbl_unmap_single(dev, phys, org_size,
+   aligned_size, dir, attrs);
+
+   return iova;
+}
+
 static void __iommu_dma_free_pages(struct page **pages, int count)
 {
while (count--)
@@ -697,11 +773,15 @@ static void iommu_dma_sync_single_for_cpu(struct device 
*dev,
 {
phys_addr_t phys;
 
-   if (dev_is_dma_coherent(dev))
+   if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
 
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-   arch_sync_dma_for_cpu(phys, size, dir);
+   if (!dev_is_dma_coherent(dev))
+   arch_sync_dma_for_cpu(phys, size, dir);
+
+   if (is_swiotlb_buffer(phys))
+   swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
 }
 
 static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -709,11 +789,15 @@ static void iommu_dma_sync_single_for_device(struct 
device *dev,
 {
phys_addr_t phys;
 
-   if (dev_is_dma_coherent(dev))
+   if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
 
phys =