The arch_sync_dma_for_cpu()/arch_sync_dma_for_device() implementation is
broken for some combinations that end up in a BUG() instead of performing
the necessary flushes.

The implementation of arch should follow the following rules:
         map             for_cpu         for_device      unmap
 TO_DEV  writeback       none            writeback       none
 TO_CPU  invalidate      invalidate*     invalidate      invalidate*
 BIDIR   writeback       invalidate      writeback       invalidate

Link: https://lore.kernel.org/lkml/[email protected]/
Signed-off-by: Guo Ren <[email protected]>
---
 arch/csky/mm/dma-mapping.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 16c2087..30a2041 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -217,7 +217,8 @@ void arch_sync_dma_for_device(struct device *dev, 
phys_addr_t paddr,
                break;
        case DMA_FROM_DEVICE:
        case DMA_BIDIRECTIONAL:
-               BUG();
+               dma_wbinv_range(vaddr + offset, vaddr + offset + size);
+               break;
        default:
                BUG();
        }
@@ -240,7 +241,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t 
paddr,
 
        switch (dir) {
        case DMA_TO_DEVICE:
-               BUG();
+               break;
        case DMA_FROM_DEVICE:
        case DMA_BIDIRECTIONAL:
                dma_wbinv_range(vaddr + offset, vaddr + offset + size);
-- 
2.7.4

Reply via email to