Re: [PATCHv7 5/9] ARM: dma-mapping: implement dma sg methods on top of any generic dma ops

2012-03-26 Thread Subash Patel

Hi Marek,

As per our discussion over IRC, I would like to check with you the 
feasibility to extend the dma operation calls for the coherent regions. 
You said that since struct page wont be available for the buffers in 
these regions, functions like arm_dma_map_sg() (below) will fail in the 
cache maintenance operations. Infact, I am facing this issue when I 
integrated dma-buf buffer sharing + v4l2/vb2 exporter patch series + 
dma_mapping (v7) and use coherent memory.


So I want to know opinion of you, others about extending the dma-mapping 
framework for scenarios which use device coherent memory.


Regards,
Subash

On 02/29/2012 08:34 PM, Marek Szyprowski wrote:

This patch converts all dma_sg methods to be generic (independent of the
current DMA mapping implementation for ARM architecture). All dma sg
operations are now implemented on top of respective
dma_map_page/dma_sync_single_for* operations from dma_map_ops structure.

Signed-off-by: Marek Szyprowskim.szyprow...@samsung.com
Signed-off-by: Kyungmin Parkkyungmin.p...@samsung.com
---
  arch/arm/mm/dma-mapping.c |   43 +++
  1 files changed, 19 insertions(+), 24 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cd5ed8d..a5a0b5b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -616,7 +616,7 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned 
long off,
  EXPORT_SYMBOL(___dma_page_dev_to_cpu);

  /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map
@@ -634,12 +634,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
  {
+   struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;

for_each_sg(sg, s, nents, i) {
-   s-dma_address = __dma_map_page(dev, sg_page(s), s-offset,
-   s-length, dir);
+   s-dma_address = ops-map_page(dev, sg_page(s), s-offset,
+   s-length, dir, attrs);
if (dma_mapping_error(dev, s-dma_address))
goto bad_mapping;
}
@@ -647,12 +648,12 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist 
*sg, int nents,

   bad_mapping:
for_each_sg(sg, s, i, j)
-   __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+   ops-unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 
attrs);
return 0;
  }

  /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -664,15 +665,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist 
*sg, int nents,
  void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
  {
+   struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
+
int i;

for_each_sg(sg, s, nents, i)
-   __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+   ops-unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 
attrs);
  }

  /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map (returned from dma_map_sg)
@@ -681,21 +684,17 @@ void arm_dma_unmap_sg(struct device *dev, struct 
scatterlist *sg, int nents,
  void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
  {
+   struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;

-   for_each_sg(sg, s, nents, i) {
-   if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
-   sg_dma_len(s), dir))
-   continue;
-
-   __dma_page_dev_to_cpu(sg_page(s), s-offset,
- s-length, dir);
-   }
+   for_each_sg(sg, s, nents, i)
+   ops-sync_single_for_cpu(dev, sg_dma_address(s), s-length,
+dir);
  }

  /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map (returned 

RE: [PATCH v12 3/3] iommu/exynos: Add iommu driver for Exynos Platforms

2012-03-26 Thread Marek Szyprowski
Hello,

I'm sorry for a delay, I was quite busy recently, but I have finally found some
time to review the code.

On Thursday, March 15, 2012 9:33 AM Cho KyongHo wrote:

 This is the System MMU driver and IOMMU API implementation for
 Exynos SOC platforms. Exynos platforms has more than 10 System
 MMUs dedicated for each multimedia accelerators.
 
 The System MMU driver is already in arc/arm/plat-s5p but it is
 moved to drivers/iommu due to Ohad Ben-Cohen gathered IOMMU drivers
 there
 
 Any device driver in Exynos platforms that needs to control its
 System MMU must call platform_set_sysmmu() to inform System MMU
 driver who will control it.
 platform_set_sysmmu() is defined in mach/sysmmu.h
 
 Cc: Joerg Roedel joerg.roe...@amd.com
 Cc: Kukjin Kim kgene@samsung.com
 Signed-off-by: KyongHo Cho pullip@samsung.com


(snipped)

 diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
 new file mode 100644
 index 000..b8daf7c
 --- /dev/null
 +++ b/drivers/iommu/exynos-iommu.c
 @@ -0,0 +1,1057 @@
 +/* linux/drivers/iommu/exynos_iommu.c
 + *
 + * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 + *   http://www.samsung.com
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + */

(snipped)

 +static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 +{
 + /* SYSMMU is in blocked when interrupt occurred. */
 + struct sysmmu_drvdata *data = dev_id;
 + struct resource *irqres;
 + struct platform_device *pdev;
 + enum exynos_sysmmu_inttype itype;
 + unsigned long addr = -1;
 +
 + int i, ret = -ENOSYS;
 +
 + read_lock(data-lock);
 +
 + WARN_ON(!is_sysmmu_active(data));
 +
 + pdev = to_platform_device(data-sysmmu);
 + for (i = 0; i  pdev-num_resources; i++) {
 + irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 + if (irqres  ((int)irqres-start == irq))
 + break;
 + }
 +
 + if (i == pdev-num_resources) {
 + itype = SYSMMU_FAULT_UNKNOWN;
 + } else {
 + i /= 2;
 +
 + itype = (enum exynos_sysmmu_inttype)
 + __ffs(__raw_readl(data-sfrbases[i] + REG_INT_STATUS));
 + if (WARN_ON(!((itype = 0)  (itype  SYSMMU_FAULT_UNKNOWN
 + itype = SYSMMU_FAULT_UNKNOWN;
 + else
 + addr = __raw_readl(
 + data-sfrbases[i] + fault_reg_offset[itype]);
 + }
 +
 + if (data-domain)
 + ret = report_iommu_fault(data-domain, data-dev,
 + addr, itype);
 +
 + if ((ret == -ENOSYS)  data-fault_handler) {
 + unsigned long base = data-pgtable;
 + if (itype != SYSMMU_FAULT_UNKNOWN)
 + base = __raw_readl(
 + data-sfrbases[i] + REG_PT_BASE_ADDR);
 + ret = data-fault_handler(itype, base, addr);
 + }
 +
 + if (!ret  (itype != SYSMMU_FAULT_UNKNOWN))
 + __raw_writel(1  itype, data-sfrbases[i] + REG_INT_CLEAR);
 + else
 + dev_dbg(data-sysmmu, (%s) %s is not handled.\n,
 + data-dbgname, sysmmu_fault_name[itype]);
 +
 + if (itype != SYSMMU_FAULT_UNKNOWN)
 + sysmmu_unblock(data-sfrbases[i]);
 +
 + read_unlock(data-lock);
 +
 + return IRQ_HANDLED;
 +}

(snipped)

 +static int exynos_sysmmu_probe(struct platform_device *pdev)
 +{
 + int i, ret;
 + struct device *dev;
 + struct sysmmu_drvdata *data;
 +
 + dev = pdev-dev;
 +
 + data = kzalloc(sizeof(*data), GFP_KERNEL);
 + if (!data) {
 + dev_dbg(dev, Not enough memory\n);
 + ret = -ENOMEM;
 + goto err_alloc;
 + }
 +
 + ret = dev_set_drvdata(dev, data);
 + if (ret) {
 + dev_dbg(dev, Unabled to initialize driver data\n);
 + goto err_init;
 + }
 +
 + data-nsfrs = pdev-num_resources / 2;
 + data-sfrbases = kmalloc(sizeof(*data-sfrbases) * data-nsfrs,
 + GFP_KERNEL);
 + if (data-sfrbases == NULL) {
 + dev_dbg(dev, Not enough memory\n);
 + ret = -ENOMEM;
 + goto err_init;
 + }
 +
 + for (i = 0; i  data-nsfrs; i++) {
 + struct resource *res;
 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
 + if (!res) {
 + dev_dbg(dev, Unable to find IOMEM region\n);
 + ret = -ENOENT;
 + goto err_res;
 + }
 +
 + data-sfrbases[i] = ioremap(res-start, resource_size(res));
 + if (!data-sfrbases[i]) {
 + dev_dbg(dev, Unable to map IOMEM @ PA:%#x\n,
 +