From: Thierry Reding <tred...@nvidia.com>

Add implementations for drm_clflush_*() on ARM by borrowing code from
the DMA mapping API implementation. Unfortunately ARM doesn't export an
API to flush caches on a page by page basis, so this replicates most of
the code.

Signed-off-by: Thierry Reding <treding at nvidia.com>
---
 drivers/gpu/drm/drm_cache.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 45 insertions(+)

diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a6b690626a6b..fca0b8994c77 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -72,6 +72,41 @@ drm_clflush_ipi_handler(void *null)
 }
 #endif

+#if defined(CONFIG_ARM)
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/highmem.h>
+#include <asm/outercache.h>
+
+static void drm_clflush_page(struct page *page)
+{
+       enum dma_data_direction dir = DMA_TO_DEVICE;
+       phys_addr_t phys = page_to_phys(page);
+       size_t size = PAGE_SIZE;
+       void *virt;
+
+       if (PageHighMem(page)) {
+               if (cache_is_vipt_nonaliasing()) {
+                       virt = kmap_atomic(page);
+                       dmac_map_area(virt, size, dir);
+                       kunmap_atomic(virt);
+               } else {
+                       virt = kmap_high_get(page);
+                       if (virt) {
+                               dmac_map_area(virt, size, dir);
+                               kunmap_high(page);
+                       }
+               }
+       } else {
+               virt = page_address(page);
+               dmac_map_area(virt, size, dir);
+       }
+
+       outer_flush_range(phys, phys + PAGE_SIZE);
+}
+#endif
+
 void
 drm_clflush_pages(struct page *pages[], unsigned long num_pages)
 {
@@ -99,6 +134,11 @@ drm_clflush_pages(struct page *pages[], unsigned long 
num_pages)
                                   (unsigned long)page_virtual + PAGE_SIZE);
                kunmap_atomic(page_virtual);
        }
+#elif defined(CONFIG_ARM)
+       unsigned long i;
+
+       for (i = 0; i < num_pages; i++)
+               drm_clflush_page(pages[i]);
 #else
        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
        WARN_ON_ONCE(1);
@@ -123,6 +163,11 @@ drm_clflush_sg(struct sg_table *st)

        if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
                printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#elif defined(CONFIG_ARM)
+       struct sg_page_iter sg_iter;
+
+       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+               drm_clflush_page(sg_page_iter_page(&sg_iter));
 #else
        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
        WARN_ON_ONCE(1);
-- 
2.1.3

Reply via email to