This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
later via a sync_for_cpu or sync_for_device call.

Cc: Geert Uytterhoeven <ge...@linux-m68k.org>
Cc: linux-m...@lists.linux-m68k.org
Signed-off-by: Alexander Duyck <alexander.h.du...@intel.com>
---
 arch/m68k/kernel/dma.c |    8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 8cf97cb..0707006 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, 
struct page *page,
 {
        dma_addr_t handle = page_to_phys(page) + offset;
 
-       dma_sync_single_for_device(dev, handle, size, dir);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_sync_single_for_device(dev, handle, size, dir);
+
        return handle;
 }
 
@@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct 
scatterlist *sglist,
 
        for_each_sg(sglist, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
+
+               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+                       continue;
+
                dma_sync_single_for_device(dev, sg->dma_address, sg->length,
                                           dir);
        }

Reply via email to