On Fri, May 19, 2023 at 02:58:57PM +0200, Christoph Hellwig wrote:
> On Fri, May 19, 2023 at 01:49:46PM +0100, Andrew Cooper wrote:
> > > The alternative would be to finally merge swiotlb-xen into swiotlb, in
> > > which case we might be able to do this later.  Let me see what I can
> > > do there.
> > 
> > If that is an option, it would be great to reduce the special-cashing.
> 
> I think it's doable, and I've been wanting it for a while.  I just
> need motivated testers, but it seems like I just found at least two :)

So looking at swiotlb-xen it does these off things where it takes a value
generated originally be xen_phys_to_dma, then only does a dma_to_phys
to go back and call pfn_valid on the result.  Does this make sense, or
is it wrong and just works by accident?  I.e. is the patch below correct?


diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 67aa74d201627d..3396c5766f0dd8 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -90,9 +90,7 @@ static inline int range_straddles_page_boundary(phys_addr_t 
p, size_t size)
 
 static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 {
-       unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
-       unsigned long xen_pfn = bfn_to_local_pfn(bfn);
-       phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
+       phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
        /* If the address is outside our domain, it CAN
         * have the same virtual address as another address
@@ -234,7 +232,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, 
struct page *page,
 
 done:
        if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
+               if (pfn_valid(PFN_DOWN(phys)))
                        arch_sync_dma_for_device(phys, size, dir);
                else
                        xen_dma_sync_for_device(dev, dev_addr, size, dir);
@@ -258,7 +256,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, 
dma_addr_t dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
+               if (pfn_valid(PFN_DOWN(paddr)))
                        arch_sync_dma_for_cpu(paddr, size, dir);
                else
                        xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
@@ -276,7 +274,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_addr,
        phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
        if (!dev_is_dma_coherent(dev)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+               if (pfn_valid(PFN_DOWN(paddr)))
                        arch_sync_dma_for_cpu(paddr, size, dir);
                else
                        xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
@@ -296,7 +294,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, 
dma_addr_t dma_addr,
                swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
        if (!dev_is_dma_coherent(dev)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+               if (pfn_valid(PFN_DOWN(paddr)))
                        arch_sync_dma_for_device(paddr, size, dir);
                else
                        xen_dma_sync_for_device(dev, dma_addr, size, dir);

Reply via email to