From: Rob Clark <robdcl...@chromium.org>

Since there is no real device associated with VGEM, it is impossible to
end up with appropriate dev->dma_ops, meaning that we have no way to
invalidate the shmem pages allocated by VGEM.  So, at least on platforms
without drm_cflush_pages(), we end up with corruption when cache lines
from previous usage of VGEM bo pages get evicted to memory.

The only sane option is to use cached mappings.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
v3: rebased on drm-tip

 drivers/gpu/drm/vgem/vgem_drv.c | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index e7d12e93b1f0..84262e2bd7f7 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -259,9 +259,6 @@ static int vgem_mmap(struct file *filp, struct 
vm_area_struct *vma)
        if (ret)
                return ret;
 
-       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
-        * are ordinary and not special.
-        */
        vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 }
@@ -310,17 +307,17 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object 
*bo)
 static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
 {
        struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-       long n_pages = obj->size >> PAGE_SHIFT;
+       long i, n_pages = obj->size >> PAGE_SHIFT;
        struct page **pages;
 
        pages = vgem_pin_pages(bo);
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       /* Flush the object from the CPU cache so that importers can rely
-        * on coherent indirect access via the exported dma-address.
-        */
-       drm_clflush_pages(pages, n_pages);
+       for (i = 0; i < n_pages; i++) {
+               dma_sync_single_for_device(dev, page_to_phys(pages[i]),
+                                          PAGE_SIZE, DMA_BIDIRECTIONAL);
+       }
 
        return 0;
 }
@@ -328,6 +325,13 @@ static int vgem_prime_pin(struct drm_gem_object *obj, 
struct device *dev)
 static void vgem_prime_unpin(struct drm_gem_object *obj, struct device *dev)
 {
        struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+       long i, n_pages = obj->size >> PAGE_SHIFT;
+       struct page **pages = bo->pages;
+
+       for (i = 0; i < n_pages; i++) {
+               dma_sync_single_for_cpu(dev, page_to_phys(pages[i]),
+                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
+       }
 
        vgem_unpin_pages(bo);
 }
@@ -382,7 +386,7 @@ static void *vgem_prime_vmap(struct drm_gem_object *obj)
        if (IS_ERR(pages))
                return NULL;
 
-       return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+       return vmap(pages, n_pages, 0, PAGE_KERNEL);
 }
 
 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -411,7 +415,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
        fput(vma->vm_file);
        vma->vm_file = get_file(obj->filp);
        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_page_prot = 
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        return 0;
 }
-- 
2.21.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to