This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
later via a sync_for_cpu or sync_for_device call.

Cc: Mark Salter <msal...@redhat.com>
Cc: Aurelien Jacquiot <a-jacqu...@ti.com>
Signed-off-by: Alexander Duyck <alexander.h.du...@intel.com>
---
 arch/c6x/kernel/dma.c |   14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index db4a6a3..6752df3 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, 
struct page *page,
 {
        dma_addr_t handle = virt_to_phys(page_address(page) + offset);
 
-       c6x_dma_sync(handle, size, dir);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               c6x_dma_sync(handle, size, dir);
+
        return handle;
 }
 
 static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       c6x_dma_sync(handle, size, dir);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               c6x_dma_sync(handle, size, dir);
 }
 
 static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct 
scatterlist *sglist,
 
        for_each_sg(sglist, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
-               c6x_dma_sync(sg->dma_address, sg->length, dir);
+               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+                       c6x_dma_sync(sg->dma_address, sg->length, dir);
        }
 
        return nents;
@@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct 
scatterlist *sglist,
        struct scatterlist *sg;
        int i;
 
+       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+               return;
+
        for_each_sg(sglist, sg, nents, i)
                c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
-
 }
 
 static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,

Reply via email to