Re: [PATCH 11/12] dma-mapping: move dma_common_{mmap,get_sgtable} out of mapping.c

2020-09-11 Thread Christoph Hellwig
On Thu, Sep 10, 2020 at 02:34:18PM +0100, Robin Murphy wrote:
> On 2020-09-08 17:47, Christoph Hellwig wrote:
>> Add a new file that contains helpera for misc DMA ops, which is only
>
> The Latin plural of the singular "helperum", I guess? :P

I've switched it to helpers, that might be easier to understand :)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 11/12] dma-mapping: move dma_common_{mmap, get_sgtable} out of mapping.c

2020-09-10 Thread Robin Murphy

On 2020-09-08 17:47, Christoph Hellwig wrote:

Add a new file that contains helpera for misc DMA ops, which is only


The Latin plural of the singular "helperum", I guess? :P


built when CONFIG_DMA_OPS is set.


Reviewed-by: Robin Murphy 


Signed-off-by: Christoph Hellwig 
---
  kernel/dma/Makefile  |  1 +
  kernel/dma/mapping.c | 47 +---
  kernel/dma/ops_helpers.c | 51 
  3 files changed, 53 insertions(+), 46 deletions(-)
  create mode 100644 kernel/dma/ops_helpers.c

diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 32c7c1942bbd6c..dc755ab68aabf9 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,6 +1,7 @@
  # SPDX-License-Identifier: GPL-2.0
  
  obj-$(CONFIG_HAS_DMA)			+= mapping.o direct.o

+obj-$(CONFIG_DMA_OPS)  += ops_helpers.o
  obj-$(CONFIG_DMA_OPS) += dummy.o
  obj-$(CONFIG_DMA_CMA) += contiguous.o
  obj-$(CONFIG_DMA_DECLARE_COHERENT)+= coherent.o
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 0d129421e75fc8..848c95c27d79ff 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -8,7 +8,7 @@
  #include  /* for max_pfn */
  #include 
  #include 
-#include 
+#include 
  #include 
  #include 
  #include 
@@ -295,22 +295,6 @@ void dma_sync_sg_for_device(struct device *dev, struct 
scatterlist *sg,
  }
  EXPORT_SYMBOL(dma_sync_sg_for_device);
  
-/*

- * Create scatter-list for the already allocated DMA buffer.
- */
-int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-void *cpu_addr, dma_addr_t dma_addr, size_t size,
-unsigned long attrs)
-{
-   struct page *page = virt_to_page(cpu_addr);
-   int ret;
-
-   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-   if (!ret)
-   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-   return ret;
-}
-
  /*
   * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
   * that the intention is to allow exporting memory allocated via the
@@ -358,35 +342,6 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 
unsigned long attrs)
  }
  #endif /* CONFIG_MMU */
  
-/*

- * Create userspace mapping for the DMA-coherent memory.
- */
-int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-   void *cpu_addr, dma_addr_t dma_addr, size_t size,
-   unsigned long attrs)
-{
-#ifdef CONFIG_MMU
-   unsigned long user_count = vma_pages(vma);
-   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   unsigned long off = vma->vm_pgoff;
-   int ret = -ENXIO;
-
-   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
-
-   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
-   return ret;
-
-   if (off >= count || user_count > count - off)
-   return -ENXIO;
-
-   return remap_pfn_range(vma, vma->vm_start,
-   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
-   user_count << PAGE_SHIFT, vma->vm_page_prot);
-#else
-   return -ENXIO;
-#endif /* CONFIG_MMU */
-}
-
  /**
   * dma_can_mmap - check if a given device supports dma_mmap_*
   * @dev: device to check
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
new file mode 100644
index 00..e443c69be4299f
--- /dev/null
+++ b/kernel/dma/ops_helpers.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for DMA ops implementations.  These generally rely on the fact that
+ * the allocated memory contains normal pages in the direct kernel mapping.
+ */
+#include 
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+void *cpu_addr, dma_addr_t dma_addr, size_t size,
+unsigned long attrs)
+{
+   struct page *page = virt_to_page(cpu_addr);
+   int ret;
+
+   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+   if (!ret)
+   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+   return ret;
+}
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+   unsigned long attrs)
+{
+#ifdef CONFIG_MMU
+   unsigned long user_count = vma_pages(vma);
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+   unsigned long off = vma->vm_pgoff;
+   int ret = -ENXIO;
+
+   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
+   return ret;
+
+   if (off >= count || user_count > count - off)
+   return -ENXIO;
+
+   return remap_pfn_range(vma, vma->vm_start,
+   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+

[PATCH 11/12] dma-mapping: move dma_common_{mmap, get_sgtable} out of mapping.c

2020-09-08 Thread Christoph Hellwig
Add a new file that contains helpera for misc DMA ops, which is only
built when CONFIG_DMA_OPS is set.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/Makefile  |  1 +
 kernel/dma/mapping.c | 47 +---
 kernel/dma/ops_helpers.c | 51 
 3 files changed, 53 insertions(+), 46 deletions(-)
 create mode 100644 kernel/dma/ops_helpers.c

diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 32c7c1942bbd6c..dc755ab68aabf9 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_HAS_DMA)  += mapping.o direct.o
+obj-$(CONFIG_DMA_OPS)  += ops_helpers.o
 obj-$(CONFIG_DMA_OPS)  += dummy.o
 obj-$(CONFIG_DMA_CMA)  += contiguous.o
 obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 0d129421e75fc8..848c95c27d79ff 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -8,7 +8,7 @@
 #include  /* for max_pfn */
 #include 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -295,22 +295,6 @@ void dma_sync_sg_for_device(struct device *dev, struct 
scatterlist *sg,
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-/*
- * Create scatter-list for the already allocated DMA buffer.
- */
-int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-void *cpu_addr, dma_addr_t dma_addr, size_t size,
-unsigned long attrs)
-{
-   struct page *page = virt_to_page(cpu_addr);
-   int ret;
-
-   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-   if (!ret)
-   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-   return ret;
-}
-
 /*
  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
  * that the intention is to allow exporting memory allocated via the
@@ -358,35 +342,6 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 
unsigned long attrs)
 }
 #endif /* CONFIG_MMU */
 
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-   void *cpu_addr, dma_addr_t dma_addr, size_t size,
-   unsigned long attrs)
-{
-#ifdef CONFIG_MMU
-   unsigned long user_count = vma_pages(vma);
-   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   unsigned long off = vma->vm_pgoff;
-   int ret = -ENXIO;
-
-   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
-
-   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
-   return ret;
-
-   if (off >= count || user_count > count - off)
-   return -ENXIO;
-
-   return remap_pfn_range(vma, vma->vm_start,
-   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
-   user_count << PAGE_SHIFT, vma->vm_page_prot);
-#else
-   return -ENXIO;
-#endif /* CONFIG_MMU */
-}
-
 /**
  * dma_can_mmap - check if a given device supports dma_mmap_*
  * @dev: device to check
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
new file mode 100644
index 00..e443c69be4299f
--- /dev/null
+++ b/kernel/dma/ops_helpers.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for DMA ops implementations.  These generally rely on the fact that
+ * the allocated memory contains normal pages in the direct kernel mapping.
+ */
+#include 
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+void *cpu_addr, dma_addr_t dma_addr, size_t size,
+unsigned long attrs)
+{
+   struct page *page = virt_to_page(cpu_addr);
+   int ret;
+
+   ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+   if (!ret)
+   sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+   return ret;
+}
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+   unsigned long attrs)
+{
+#ifdef CONFIG_MMU
+   unsigned long user_count = vma_pages(vma);
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+   unsigned long off = vma->vm_pgoff;
+   int ret = -ENXIO;
+
+   vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
+   return ret;
+
+   if (off >= count || user_count > count - off)
+   return -ENXIO;
+
+   return remap_pfn_range(vma, vma->vm_start,
+   page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+   user_count << PAGE_SHIFT, vma->vm_page_prot);
+#else
+   return -ENXIO;
+#endif /* CONFIG_MMU */
+}
-- 
2.28.0