[PATCH v6 01/19] arm: make SWIOTLB available

2013-09-27 Thread Stefano Stabellini
IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary,
provided by lib/iommu_helper.c.

Signed-off-by: Stefano Stabellini 
Reviewed-by: Konrad Rzeszutek Wilk 
CC: will.dea...@arm.com
CC: li...@arm.linux.org.uk


Changes in v6:
- check for dev->dma_mask being NULL in dma_capable.

Changes in v5:
- implement dma_mark_clean using dmac_flush_range.

Changes in v3:
- dma_capable: do not treat dma_mask as a limit;
- remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
---
 arch/arm/Kconfig   |6 +
 arch/arm/include/asm/dma-mapping.h |   37 
 2 files changed, 43 insertions(+), 0 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e0..c0bfb33 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1832,6 +1832,12 @@ config CC_STACKPROTECTOR
  neutralized via a kernel panic.
  This feature requires gcc version 4.2 or above.
 
+config SWIOTLB
+   def_bool y
+
+config IOMMU_HELPER
+   def_bool SWIOTLB
+
 config XEN_DOM0
def_bool y
depends on XEN
diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 5b579b9..8807124 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -10,6 +10,7 @@
 
 #include 
 #include 
+#include 
 
 #define DMA_ERROR_CODE (~0)
 extern struct dma_map_ops arm_dma_ops;
@@ -86,6 +87,42 @@ static inline dma_addr_t virt_to_dma(struct device *dev, 
void *addr)
 }
 #endif
 
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+   unsigned int offset = paddr & ~PAGE_MASK;
+   return pfn_to_dma(dev, paddr >> PAGE_SHIFT) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+   unsigned int offset = dev_addr & ~PAGE_MASK;
+   return (dma_to_pfn(dev, dev_addr) << PAGE_SHIFT) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t 
size)
+{
+   u64 limit, mask;
+   
+   if (!dev->dma_mask)
+   return 0;
+
+   mask = *dev->dma_mask;
+
+   limit = (mask + 1) & ~mask;
+   if (limit && size > limit)
+   return 0;
+
+   if ((addr | (addr + size - 1)) & ~mask)
+   return 0;
+
+   return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+   dmac_flush_range(addr, addr + size);
+}
+
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
-- 
1.7.2.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v6 01/19] arm: make SWIOTLB available

2013-09-27 Thread Stefano Stabellini
IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary,
provided by lib/iommu_helper.c.

Signed-off-by: Stefano Stabellini stefano.stabell...@eu.citrix.com
Reviewed-by: Konrad Rzeszutek Wilk konrad.w...@oracle.com
CC: will.dea...@arm.com
CC: li...@arm.linux.org.uk


Changes in v6:
- check for dev-dma_mask being NULL in dma_capable.

Changes in v5:
- implement dma_mark_clean using dmac_flush_range.

Changes in v3:
- dma_capable: do not treat dma_mask as a limit;
- remove SWIOTLB dependency on NEED_SG_DMA_LENGTH.
---
 arch/arm/Kconfig   |6 +
 arch/arm/include/asm/dma-mapping.h |   37 
 2 files changed, 43 insertions(+), 0 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e0..c0bfb33 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1832,6 +1832,12 @@ config CC_STACKPROTECTOR
  neutralized via a kernel panic.
  This feature requires gcc version 4.2 or above.
 
+config SWIOTLB
+   def_bool y
+
+config IOMMU_HELPER
+   def_bool SWIOTLB
+
 config XEN_DOM0
def_bool y
depends on XEN
diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 5b579b9..8807124 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -10,6 +10,7 @@
 
 #include asm-generic/dma-coherent.h
 #include asm/memory.h
+#include asm/cacheflush.h
 
 #define DMA_ERROR_CODE (~0)
 extern struct dma_map_ops arm_dma_ops;
@@ -86,6 +87,42 @@ static inline dma_addr_t virt_to_dma(struct device *dev, 
void *addr)
 }
 #endif
 
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+   unsigned int offset = paddr  ~PAGE_MASK;
+   return pfn_to_dma(dev, paddr  PAGE_SHIFT) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+   unsigned int offset = dev_addr  ~PAGE_MASK;
+   return (dma_to_pfn(dev, dev_addr)  PAGE_SHIFT) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t 
size)
+{
+   u64 limit, mask;
+   
+   if (!dev-dma_mask)
+   return 0;
+
+   mask = *dev-dma_mask;
+
+   limit = (mask + 1)  ~mask;
+   if (limit  size  limit)
+   return 0;
+
+   if ((addr | (addr + size - 1))  ~mask)
+   return 0;
+
+   return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+   dmac_flush_range(addr, addr + size);
+}
+
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
-- 
1.7.2.5

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/