Although there are known problems caused by this, it seems prudent to
invalidate the region of memory we are about remap as
uncached. Additionaliy this matches how dma_alloc_coherent() is
implemented on ARM.

Signed-off-by: Andrey Smirnov <andrew.smir...@gmail.com>
---
 arch/arm/cpu/mmu_64.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index a7186eda4..1ee6a3b8c 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -248,6 +248,14 @@ void mmu_disable(void)
        isb();
 }
 
+static void dma_inv_range(void *ptr, size_t size)
+{
+       unsigned long start = (unsigned long)ptr;
+       unsigned long end = start + size - 1;
+
+       v8_inv_dcache_range(start, end);
+}
+
 void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
 {
        void *ret;
@@ -257,6 +265,8 @@ void *dma_alloc_coherent(size_t size, dma_addr_t 
*dma_handle)
        if (dma_handle)
                *dma_handle = (dma_addr_t)ret;
 
+       dma_inv_range(ret, size);
+
        arch_remap_range(ret, size, MAP_UNCACHED);
 
        return ret;
-- 
2.20.1


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

Reply via email to