Re: [Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-09-09 Thread Stefano Stabellini
On Thu, 5 Sep 2019, Christoph Hellwig wrote:
> Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA
> on-coherent devices.
> 
> Signed-off-by: Christoph Hellwig 

This is much better and much more readable.

Reviewed-by: Stefano Stabellini 

> ---
>  arch/arm/include/asm/device.h|  3 -
>  arch/arm/include/asm/xen/page-coherent.h | 72 +---
>  arch/arm/mm/dma-mapping.c|  8 +--
>  drivers/xen/swiotlb-xen.c| 20 ---
>  4 files changed, 28 insertions(+), 75 deletions(-)
> 
> diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
> index f6955b55c544..c675bc0d5aa8 100644
> --- a/arch/arm/include/asm/device.h
> +++ b/arch/arm/include/asm/device.h
> @@ -14,9 +14,6 @@ struct dev_archdata {
>  #endif
>  #ifdef CONFIG_ARM_DMA_USE_IOMMU
>   struct dma_iommu_mapping*mapping;
> -#endif
> -#ifdef CONFIG_XEN
> - const struct dma_map_ops *dev_dma_ops;
>  #endif
>   unsigned int dma_coherent:1;
>   unsigned int dma_ops_setup:1;
> diff --git a/arch/arm/include/asm/xen/page-coherent.h 
> b/arch/arm/include/asm/xen/page-coherent.h
> index 2c403e7c782d..602ac02f154c 100644
> --- a/arch/arm/include/asm/xen/page-coherent.h
> +++ b/arch/arm/include/asm/xen/page-coherent.h
> @@ -6,23 +6,37 @@
>  #include 
>  #include 
>  
> -static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
> -{
> - if (dev && dev->archdata.dev_dma_ops)
> - return dev->archdata.dev_dma_ops;
> - return get_arch_dma_ops(NULL);
> -}
> -
>  static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t 
> size,
>   dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
>  {
> - return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
> attrs);
> + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
>  }
>  
>  static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
>   void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
>  {
> - xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
> + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
> +}
> +
> +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
> + dma_addr_t handle, size_t size, enum dma_data_direction dir)
> +{
> + unsigned long pfn = PFN_DOWN(handle);
> +
> + if (pfn_valid(pfn))
> + dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
> + else
> + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
> +}
> +
> +static inline void xen_dma_sync_single_for_device(struct device *hwdev,
> + dma_addr_t handle, size_t size, enum dma_data_direction dir)
> +{
> + unsigned long pfn = PFN_DOWN(handle);
> + if (pfn_valid(pfn))
> + dma_direct_sync_single_for_device(hwdev, handle, size, dir);
> + else
> + __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
>  }
>  
>  static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
> @@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, 
> struct page *page,
>   bool local = (page_pfn <= dev_pfn) &&
>   (dev_pfn - page_pfn < compound_pages);
>  
> - /*
> -  * Dom0 is mapped 1:1, while the Linux page can span across
> -  * multiple Xen pages, it's not possible for it to contain a
> -  * mix of local and foreign Xen pages. So if the first xen_pfn
> -  * == mfn the page is local otherwise it's a foreign page
> -  * grant-mapped in dom0. If the page is local we can safely
> -  * call the native dma_ops function, otherwise we call the xen
> -  * specific function.
> -  */
>   if (local)
> - xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, 
> dir, attrs);
> + dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
>   else
>   __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, 
> attrs);
>  }
> @@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device 
> *hwdev, dma_addr_t handle,
>* safely call the native dma_ops function, otherwise we call the xen
>* specific function.
>*/
> - if (pfn_valid(pfn)) {
> - if (xen_get_dma_ops(hwdev)->unmap_page)
> - xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
> dir, attrs);
> - } else
> + if (pfn_valid(pfn))
> + dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
> + else
>   __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
>  }
>  
> -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
> - dma_addr_t handle, size_t size, enum dma_data_direction dir)
> -{
> - unsigned long pfn = PFN_DOWN(handle);
> - if (pfn_valid(pfn)) {
> - if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
> -

[Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-09-05 Thread Christoph Hellwig
Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA
on-coherent devices.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/include/asm/device.h|  3 -
 arch/arm/include/asm/xen/page-coherent.h | 72 +---
 arch/arm/mm/dma-mapping.c|  8 +--
 drivers/xen/swiotlb-xen.c| 20 ---
 4 files changed, 28 insertions(+), 75 deletions(-)

diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index f6955b55c544..c675bc0d5aa8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -14,9 +14,6 @@ struct dev_archdata {
 #endif
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping*mapping;
-#endif
-#ifdef CONFIG_XEN
-   const struct dma_map_ops *dev_dma_ops;
 #endif
unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
diff --git a/arch/arm/include/asm/xen/page-coherent.h 
b/arch/arm/include/asm/xen/page-coherent.h
index 2c403e7c782d..602ac02f154c 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -6,23 +6,37 @@
 #include 
 #include 
 
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
-   if (dev && dev->archdata.dev_dma_ops)
-   return dev->archdata.dev_dma_ops;
-   return get_arch_dma_ops(NULL);
-}
-
 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 {
-   return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
attrs);
+   return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
 }
 
 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
 {
-   xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+   dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+   dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+   unsigned long pfn = PFN_DOWN(handle);
+
+   if (pfn_valid(pfn))
+   dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
+   else
+   __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+   dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+   unsigned long pfn = PFN_DOWN(handle);
+   if (pfn_valid(pfn))
+   dma_direct_sync_single_for_device(hwdev, handle, size, dir);
+   else
+   __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
 }
 
 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
@@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, 
struct page *page,
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
 
-   /*
-* Dom0 is mapped 1:1, while the Linux page can span across
-* multiple Xen pages, it's not possible for it to contain a
-* mix of local and foreign Xen pages. So if the first xen_pfn
-* == mfn the page is local otherwise it's a foreign page
-* grant-mapped in dom0. If the page is local we can safely
-* call the native dma_ops function, otherwise we call the xen
-* specific function.
-*/
if (local)
-   xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, 
dir, attrs);
+   dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, 
attrs);
 }
@@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device *hwdev, 
dma_addr_t handle,
 * safely call the native dma_ops function, otherwise we call the xen
 * specific function.
 */
-   if (pfn_valid(pfn)) {
-   if (xen_get_dma_ops(hwdev)->unmap_page)
-   xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
dir, attrs);
-   } else
+   if (pfn_valid(pfn))
+   dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+   else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
 }
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-   dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-   unsigned long pfn = PFN_DOWN(handle);
-   if (pfn_valid(pfn)) {
-   if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
-   xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, 
handle, size, dir);
-   } else
-   __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-

Re: [Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-08-29 Thread Stefano Stabellini
On Mon, 26 Aug 2019, Christoph Hellwig wrote:
> Reuse the arm64 code that uses the dma-direct/swiotlb helpers for DMA
> non-coherent devices.

This patch does a bunch of things not listed in the commit message, such
as moving the static inline functions to include/xen/arm/page-coherent.h
and removing xen_swiotlb_dma_mmap and xen_swiotlb_get_sgtable because
unnecessary.

I would prefer if they were separate patches (for bisectability). It's
OK if you want to keep it all in one patch but please list all changes
the commit message.

In any case, I looked at the patch in details and it does all the right
things -- it's correct.


> Signed-off-by: Christoph Hellwig 
> ---
>  arch/arm/include/asm/device.h  |  3 -
>  arch/arm/include/asm/xen/page-coherent.h   | 93 --
>  arch/arm/mm/dma-mapping.c  |  8 +-
>  arch/arm64/include/asm/xen/page-coherent.h | 75 -
>  drivers/xen/swiotlb-xen.c  | 49 +---
>  include/xen/arm/page-coherent.h| 80 +++
>  6 files changed, 83 insertions(+), 225 deletions(-)
> 
> diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
> index f6955b55c544..c675bc0d5aa8 100644
> --- a/arch/arm/include/asm/device.h
> +++ b/arch/arm/include/asm/device.h
> @@ -14,9 +14,6 @@ struct dev_archdata {
>  #endif
>  #ifdef CONFIG_ARM_DMA_USE_IOMMU
>   struct dma_iommu_mapping*mapping;
> -#endif
> -#ifdef CONFIG_XEN
> - const struct dma_map_ops *dev_dma_ops;
>  #endif
>   unsigned int dma_coherent:1;
>   unsigned int dma_ops_setup:1;
> diff --git a/arch/arm/include/asm/xen/page-coherent.h 
> b/arch/arm/include/asm/xen/page-coherent.h
> index 2c403e7c782d..27e984977402 100644
> --- a/arch/arm/include/asm/xen/page-coherent.h
> +++ b/arch/arm/include/asm/xen/page-coherent.h
> @@ -1,95 +1,2 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
> -#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
> -#define _ASM_ARM_XEN_PAGE_COHERENT_H
> -
> -#include 
> -#include 
>  #include 
> -
> -static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
> -{
> - if (dev && dev->archdata.dev_dma_ops)
> - return dev->archdata.dev_dma_ops;
> - return get_arch_dma_ops(NULL);
> -}
> -
> -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t 
> size,
> - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
> -{
> - return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
> attrs);
> -}
> -
> -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
> - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
> -{
> - xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
> -}
> -
> -static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
> -  dma_addr_t dev_addr, unsigned long offset, size_t size,
> -  enum dma_data_direction dir, unsigned long attrs)
> -{
> - unsigned long page_pfn = page_to_xen_pfn(page);
> - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
> - unsigned long compound_pages =
> - (1< - bool local = (page_pfn <= dev_pfn) &&
> - (dev_pfn - page_pfn < compound_pages);
> -
> - /*
> -  * Dom0 is mapped 1:1, while the Linux page can span across
> -  * multiple Xen pages, it's not possible for it to contain a
> -  * mix of local and foreign Xen pages. So if the first xen_pfn
> -  * == mfn the page is local otherwise it's a foreign page
> -  * grant-mapped in dom0. If the page is local we can safely
> -  * call the native dma_ops function, otherwise we call the xen
> -  * specific function.
> -  */
> - if (local)
> - xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, 
> dir, attrs);
> - else
> - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, 
> attrs);
> -}
> -
> -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t 
> handle,
> - size_t size, enum dma_data_direction dir, unsigned long attrs)
> -{
> - unsigned long pfn = PFN_DOWN(handle);
> - /*
> -  * Dom0 is mapped 1:1, while the Linux page can be spanned accross
> -  * multiple Xen page, it's not possible to have a mix of local and
> -  * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
> -  * foreign mfn will always return false. If the page is local we can
> -  * safely call the native dma_ops function, otherwise we call the xen
> -  * specific function.
> -  */
> - if (pfn_valid(pfn)) {
> - if (xen_get_dma_ops(hwdev)->unmap_page)
> - xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
> dir, attrs);
> - } else
> - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
> -}
> -
> -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
> - dma_addr_t handle, 

[Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-08-26 Thread Christoph Hellwig
Reuse the arm64 code that uses the dma-direct/swiotlb helpers for DMA
non-coherent devices.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/include/asm/device.h  |  3 -
 arch/arm/include/asm/xen/page-coherent.h   | 93 --
 arch/arm/mm/dma-mapping.c  |  8 +-
 arch/arm64/include/asm/xen/page-coherent.h | 75 -
 drivers/xen/swiotlb-xen.c  | 49 +---
 include/xen/arm/page-coherent.h| 80 +++
 6 files changed, 83 insertions(+), 225 deletions(-)

diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index f6955b55c544..c675bc0d5aa8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -14,9 +14,6 @@ struct dev_archdata {
 #endif
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping*mapping;
-#endif
-#ifdef CONFIG_XEN
-   const struct dma_map_ops *dev_dma_ops;
 #endif
unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
diff --git a/arch/arm/include/asm/xen/page-coherent.h 
b/arch/arm/include/asm/xen/page-coherent.h
index 2c403e7c782d..27e984977402 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,95 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include 
-#include 
 #include 
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
-   if (dev && dev->archdata.dev_dma_ops)
-   return dev->archdata.dev_dma_ops;
-   return get_arch_dma_ops(NULL);
-}
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-   dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
-   return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-   void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
-   xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-dma_addr_t dev_addr, unsigned long offset, size_t size,
-enum dma_data_direction dir, unsigned long attrs)
-{
-   unsigned long page_pfn = page_to_xen_pfn(page);
-   unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
-   unsigned long compound_pages =
-   (1unmap_page)
-   xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
dir, attrs);
-   } else
-   __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-   dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-   unsigned long pfn = PFN_DOWN(handle);
-   if (pfn_valid(pfn)) {
-   if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
-   xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, 
handle, size, dir);
-   } else
-   __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-   dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-   unsigned long pfn = PFN_DOWN(handle);
-   if (pfn_valid(pfn)) {
-   if (xen_get_dma_ops(hwdev)->sync_single_for_device)
-   xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, 
handle, size, dir);
-   } else
-   __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index d42557ee69c2..738097396445 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1132,10 +1132,6 @@ static const struct dma_map_ops 
*arm_get_dma_map_ops(bool coherent)
 * 32-bit DMA.
 * Use the generic dma-direct / swiotlb ops code in that case, as that
 * handles 

Re: [Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-08-26 Thread Christoph Hellwig
On Mon, Aug 19, 2019 at 12:45:17PM +0100, Julien Grall wrote:
> On 8/16/19 2:00 PM, Christoph Hellwig wrote:
>> +static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
>> + dma_addr_t dev_addr, unsigned long offset, size_t size,
>> + enum dma_data_direction dir, unsigned long attrs)
>> +{
>> +unsigned long page_pfn = page_to_xen_pfn(page);
>> +unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
>> +unsigned long compound_pages =
>> +(1<> +bool local = (page_pfn <= dev_pfn) &&
>> +(dev_pfn - page_pfn < compound_pages);
>> +
>
> The Arm version as a comment here. Could we retain it?

I've added it in this patch, altough the rewrites later on mean it will
go away in favour of a new comment elsewhere anyway.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-08-19 Thread Julien Grall

Hi Christoph,

On 8/16/19 2:00 PM, Christoph Hellwig wrote:

+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+dma_addr_t dev_addr, unsigned long offset, size_t size,
+enum dma_data_direction dir, unsigned long attrs)
+{
+   unsigned long page_pfn = page_to_xen_pfn(page);
+   unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+   unsigned long compound_pages =
+   (1<

The Arm version as a comment here. Could we retain it?


+   if (local)
+   dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
+   else
+   __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, 
attrs);
+}
+


Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

2019-08-16 Thread Christoph Hellwig
Reuse the arm64 code that uses the dma-direct/swiotlb helpers for DMA
non-coherent devices.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/Kconfig   |  4 +
 arch/arm/include/asm/device.h  |  3 -
 arch/arm/include/asm/xen/page-coherent.h   | 93 --
 arch/arm/mm/Kconfig|  4 -
 arch/arm/mm/dma-mapping.c  |  8 +-
 arch/arm64/include/asm/xen/page-coherent.h | 75 -
 drivers/xen/swiotlb-xen.c  | 49 +---
 include/xen/arm/page-coherent.h| 71 +
 8 files changed, 78 insertions(+), 229 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
+   select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
+   select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
+   select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
+   select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index f6955b55c544..c675bc0d5aa8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -14,9 +14,6 @@ struct dev_archdata {
 #endif
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping*mapping;
-#endif
-#ifdef CONFIG_XEN
-   const struct dma_map_ops *dev_dma_ops;
 #endif
unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
diff --git a/arch/arm/include/asm/xen/page-coherent.h 
b/arch/arm/include/asm/xen/page-coherent.h
index 2c403e7c782d..27e984977402 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,95 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include 
-#include 
 #include 
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
-   if (dev && dev->archdata.dev_dma_ops)
-   return dev->archdata.dev_dma_ops;
-   return get_arch_dma_ops(NULL);
-}
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-   dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
-   return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-   void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
-   xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-dma_addr_t dev_addr, unsigned long offset, size_t size,
-enum dma_data_direction dir, unsigned long attrs)
-{
-   unsigned long page_pfn = page_to_xen_pfn(page);
-   unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
-   unsigned long compound_pages =
-   (1unmap_page)
-   xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
dir, attrs);
-   } else
-   __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-   dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-   unsigned long pfn = PFN_DOWN(handle);
-   if (pfn_valid(pfn)) {
-   if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
-