Re: [PATCH v1 5/8] dma: Use size for swiotlb boundary checks

2021-06-03 Thread Robin Murphy

On 2021-06-03 01:41, Andi Kleen wrote:

swiotlb currently only uses the start address of a DMA to check if something
is in the swiotlb or not. But with virtio and untrusted hosts the host
could give some DMA mapping that crosses the swiotlb boundaries,
potentially leaking or corrupting data. Add size checks to all the swiotlb
checks and reject any DMAs that cross the swiotlb buffer boundaries.

Signed-off-by: Andi Kleen 
---
  drivers/iommu/dma-iommu.c   | 13 ++---
  drivers/xen/swiotlb-xen.c   | 11 ++-
  include/linux/dma-mapping.h |  4 ++--
  include/linux/swiotlb.h |  8 +---
  kernel/dma/direct.c |  8 
  kernel/dma/direct.h |  8 
  kernel/dma/mapping.c|  4 ++--
  net/xdp/xsk_buff_pool.c |  2 +-
  8 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7bcdd1205535..7ef13198721b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -504,7 +504,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
  
  	__iommu_dma_unmap(dev, dma_addr, size);


If you can't trust size below then you've already corrupted the IOMMU 
pagetables here :/


Robin.


-   if (unlikely(is_swiotlb_buffer(phys)))
+   if (unlikely(is_swiotlb_buffer(phys, size)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
  }
  
@@ -575,7 +575,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,

}
  
  	iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);

-   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys, org_size))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
  }
@@ -781,7 +781,7 @@ static void iommu_dma_sync_single_for_cpu(struct device 
*dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
  
-	if (is_swiotlb_buffer(phys))

+   if (is_swiotlb_buffer(phys, size))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
  }
  
@@ -794,7 +794,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,

return;
  
  	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);

-   if (is_swiotlb_buffer(phys))
+   if (is_swiotlb_buffer(phys, size))
swiotlb_sync_single_for_device(dev, phys, size, dir);
  
  	if (!dev_is_dma_coherent(dev))

@@ -815,7 +815,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
  
-		if (is_swiotlb_buffer(sg_phys(sg)))

+   if (is_swiotlb_buffer(sg_phys(sg), sg->length))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir);
}
@@ -832,10 +832,9 @@ static void iommu_dma_sync_sg_for_device(struct device 
*dev,
return;
  
  	for_each_sg(sgl, sg, nelems, i) {

-   if (is_swiotlb_buffer(sg_phys(sg)))
+   if (is_swiotlb_buffer(sg_phys(sg), sg->length))
swiotlb_sync_single_for_device(dev, sg_phys(sg),
   sg->length, dir);
-
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 24d11861ac7d..333846af8d35 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -89,7 +89,8 @@ static inline int range_straddles_page_boundary(phys_addr_t 
p, size_t size)
return 0;
  }
  
-static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)

+static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr,
+size_t size)
  {
unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
@@ -100,7 +101,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
 * in our domain. Therefore _only_ check address within our domain.
 */
if (pfn_valid(PFN_DOWN(paddr)))
-   return is_swiotlb_buffer(paddr);
+   return is_swiotlb_buffer(paddr, size);
return 0;
  }
  
@@ -431,7 +432,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,

}
  
  	/* NOTE: We use dev_addr here, not paddr! */

-   if (is_xen_swiotlb_buffer(hwdev, dev_addr))
+   if (is_xen_swiotlb_buffer(hwdev, dev_addr, size))
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
  }
  
@@ -448,7 +449,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,

  

Re: [PATCH v1 5/8] dma: Use size for swiotlb boundary checks

2021-06-02 Thread Andi Kleen



On 6/2/2021 6:48 PM, Konrad Rzeszutek Wilk wrote:

On Wed, Jun 02, 2021 at 05:41:30PM -0700, Andi Kleen wrote:

swiotlb currently only uses the start address of a DMA to check if something
is in the swiotlb or not. But with virtio and untrusted hosts the host
could give some DMA mapping that crosses the swiotlb boundaries,
potentially leaking or corrupting data. Add size checks to all the swiotlb
checks and reject any DMAs that cross the swiotlb buffer boundaries.

I seem to be only CC-ed on this and #7, so please bear with me.
You weren't cc'ed originally so if you get partial emails it must be 
through some list.


But could you explain to me why please:

commit daf9514fd5eb098d7d6f3a1247cb8cc48fc94155 (swiotlb/stable/for-linus-5.12)
Author: Martin Radev 
Date:   Tue Jan 12 16:07:29 2021 +0100

 swiotlb: Validate bounce size in the sync/unmap path

does not solve the problem as well?


Thanks. I missed that patch, race condition.

One major difference of my patch is that it supports an error return, 
which allows virtio to error out. This is important in virtio because 
otherwise you'll end up with uninitialized memory on the target without 
any indication. This uninitialized memory could be an potential attack 
vector on the guest memory, e.g. if the attacker finds some way to echo 
it out again.


But the error return could be added to your infrastructure too and what 
would make this patch much shorter. I'll take a look at that.


-Andi





___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v1 5/8] dma: Use size for swiotlb boundary checks

2021-06-02 Thread Andi Kleen
swiotlb currently only uses the start address of a DMA to check if something
is in the swiotlb or not. But with virtio and untrusted hosts the host
could give some DMA mapping that crosses the swiotlb boundaries,
potentially leaking or corrupting data. Add size checks to all the swiotlb
checks and reject any DMAs that cross the swiotlb buffer boundaries.

Signed-off-by: Andi Kleen 
---
 drivers/iommu/dma-iommu.c   | 13 ++---
 drivers/xen/swiotlb-xen.c   | 11 ++-
 include/linux/dma-mapping.h |  4 ++--
 include/linux/swiotlb.h |  8 +---
 kernel/dma/direct.c |  8 
 kernel/dma/direct.h |  8 
 kernel/dma/mapping.c|  4 ++--
 net/xdp/xsk_buff_pool.c |  2 +-
 8 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7bcdd1205535..7ef13198721b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -504,7 +504,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
 
__iommu_dma_unmap(dev, dma_addr, size);
 
-   if (unlikely(is_swiotlb_buffer(phys)))
+   if (unlikely(is_swiotlb_buffer(phys, size)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 
@@ -575,7 +575,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device 
*dev, phys_addr_t phys,
}
 
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys, org_size))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
 }
@@ -781,7 +781,7 @@ static void iommu_dma_sync_single_for_cpu(struct device 
*dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
 
-   if (is_swiotlb_buffer(phys))
+   if (is_swiotlb_buffer(phys, size))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
 }
 
@@ -794,7 +794,7 @@ static void iommu_dma_sync_single_for_device(struct device 
*dev,
return;
 
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-   if (is_swiotlb_buffer(phys))
+   if (is_swiotlb_buffer(phys, size))
swiotlb_sync_single_for_device(dev, phys, size, dir);
 
if (!dev_is_dma_coherent(dev))
@@ -815,7 +815,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
 
-   if (is_swiotlb_buffer(sg_phys(sg)))
+   if (is_swiotlb_buffer(sg_phys(sg), sg->length))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir);
}
@@ -832,10 +832,9 @@ static void iommu_dma_sync_sg_for_device(struct device 
*dev,
return;
 
for_each_sg(sgl, sg, nelems, i) {
-   if (is_swiotlb_buffer(sg_phys(sg)))
+   if (is_swiotlb_buffer(sg_phys(sg), sg->length))
swiotlb_sync_single_for_device(dev, sg_phys(sg),
   sg->length, dir);
-
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 24d11861ac7d..333846af8d35 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -89,7 +89,8 @@ static inline int range_straddles_page_boundary(phys_addr_t 
p, size_t size)
return 0;
 }
 
-static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
+static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr,
+size_t size)
 {
unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
@@ -100,7 +101,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
 * in our domain. Therefore _only_ check address within our domain.
 */
if (pfn_valid(PFN_DOWN(paddr)))
-   return is_swiotlb_buffer(paddr);
+   return is_swiotlb_buffer(paddr, size);
return 0;
 }
 
@@ -431,7 +432,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, 
dma_addr_t dev_addr,
}
 
/* NOTE: We use dev_addr here, not paddr! */
-   if (is_xen_swiotlb_buffer(hwdev, dev_addr))
+   if (is_xen_swiotlb_buffer(hwdev, dev_addr, size))
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 }
 
@@ -448,7 +449,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_addr,
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
}
 
-   if (is_xen_swiotlb_buffer(dev, dma_addr))
+   if (is_xen

Re: [PATCH v1 5/8] dma: Use size for swiotlb boundary checks

2021-06-02 Thread Konrad Rzeszutek Wilk
On Wed, Jun 02, 2021 at 05:41:30PM -0700, Andi Kleen wrote:
> swiotlb currently only uses the start address of a DMA to check if something
> is in the swiotlb or not. But with virtio and untrusted hosts the host
> could give some DMA mapping that crosses the swiotlb boundaries,
> potentially leaking or corrupting data. Add size checks to all the swiotlb
> checks and reject any DMAs that cross the swiotlb buffer boundaries.

I seem to be only CC-ed on this and #7, so please bear with me.

But could you explain to me why please:

commit daf9514fd5eb098d7d6f3a1247cb8cc48fc94155 (swiotlb/stable/for-linus-5.12)
Author: Martin Radev 
Date:   Tue Jan 12 16:07:29 2021 +0100

swiotlb: Validate bounce size in the sync/unmap path

does not solve the problem as well?

> 
> Signed-off-by: Andi Kleen 
> ---
>  drivers/iommu/dma-iommu.c   | 13 ++---
>  drivers/xen/swiotlb-xen.c   | 11 ++-
>  include/linux/dma-mapping.h |  4 ++--
>  include/linux/swiotlb.h |  8 +---
>  kernel/dma/direct.c |  8 
>  kernel/dma/direct.h |  8 
>  kernel/dma/mapping.c|  4 ++--
>  net/xdp/xsk_buff_pool.c |  2 +-
>  8 files changed, 30 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 7bcdd1205535..7ef13198721b 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -504,7 +504,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
> dma_addr_t dma_addr,
>  
>   __iommu_dma_unmap(dev, dma_addr, size);
>  
> - if (unlikely(is_swiotlb_buffer(phys)))
> + if (unlikely(is_swiotlb_buffer(phys, size)))
>   swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
>  }
>  
> @@ -575,7 +575,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device 
> *dev, phys_addr_t phys,
>   }
>  
>   iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
> - if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
> + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys, org_size))
>   swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
>   return iova;
>  }
> @@ -781,7 +781,7 @@ static void iommu_dma_sync_single_for_cpu(struct device 
> *dev,
>   if (!dev_is_dma_coherent(dev))
>   arch_sync_dma_for_cpu(phys, size, dir);
>  
> - if (is_swiotlb_buffer(phys))
> + if (is_swiotlb_buffer(phys, size))
>   swiotlb_sync_single_for_cpu(dev, phys, size, dir);
>  }
>  
> @@ -794,7 +794,7 @@ static void iommu_dma_sync_single_for_device(struct 
> device *dev,
>   return;
>  
>   phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
> - if (is_swiotlb_buffer(phys))
> + if (is_swiotlb_buffer(phys, size))
>   swiotlb_sync_single_for_device(dev, phys, size, dir);
>  
>   if (!dev_is_dma_coherent(dev))
> @@ -815,7 +815,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
>   if (!dev_is_dma_coherent(dev))
>   arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
>  
> - if (is_swiotlb_buffer(sg_phys(sg)))
> + if (is_swiotlb_buffer(sg_phys(sg), sg->length))
>   swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
>   sg->length, dir);
>   }
> @@ -832,10 +832,9 @@ static void iommu_dma_sync_sg_for_device(struct device 
> *dev,
>   return;
>  
>   for_each_sg(sgl, sg, nelems, i) {
> - if (is_swiotlb_buffer(sg_phys(sg)))
> + if (is_swiotlb_buffer(sg_phys(sg), sg->length))
>   swiotlb_sync_single_for_device(dev, sg_phys(sg),
>  sg->length, dir);
> -
>   if (!dev_is_dma_coherent(dev))
>   arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
>   }
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 24d11861ac7d..333846af8d35 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -89,7 +89,8 @@ static inline int range_straddles_page_boundary(phys_addr_t 
> p, size_t size)
>   return 0;
>  }
>  
> -static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
> +static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr,
> +  size_t size)
>  {
>   unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
>   unsigned long xen_pfn = bfn_to_local_pfn(bfn);
> @@ -100,7 +101,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
> dma_addr_t dma_addr)
>* in our domain. Therefore _only_ check address within our domain.
>*/
>   if (pfn_valid(PFN_DOWN(paddr)))
> - return is_swiotlb_buffer(paddr);
> + return is_swiotlb_buffer(paddr, size);
>   return 0;
>  }
>  
> @@ -431,7 +432,7 @@ static void xen_swiotlb_unmap_page(struct device