Create a vmap for discontinguous lmem objects to support
i915_gem_object_pin_map().

Signed-off-by: Chris Wilson <[email protected]>
Cc: Matthew Auld <[email protected]>
---
 drivers/gpu/drm/i915/gem/i915_gem_pages.c | 45 ++++++++++++++++++++---
 1 file changed, 39 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 75197ca696a8..ef5281a0bd05 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -158,10 +158,10 @@ static void __i915_gem_object_reset_page_iter(struct 
drm_i915_gem_object *obj)
 
 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
 {
-       if (i915_gem_object_is_lmem(obj))
-               io_mapping_unmap((void __force __iomem *)ptr);
-       else if (is_vmalloc_addr(ptr))
+       if (is_vmalloc_addr(ptr))
                vunmap(ptr);
+       else if (i915_gem_object_is_lmem(obj))
+               io_mapping_unmap((void __force __iomem *)ptr);
        else
                kunmap(kmap_to_page(ptr));
 }
@@ -236,6 +236,12 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj)
        return err;
 }
 
+static inline pte_t io_wc_pte(dma_addr_t addr)
+{
+       return pte_mkspecial(pfn_pte(addr >> PAGE_SHIFT,
+                                    pgprot_writecombine(PAGE_KERNEL)));
+}
+
 /* The 'mapping' part of i915_gem_object_pin_map() below */
 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
                                 enum i915_map_type type)
@@ -251,13 +257,40 @@ static void *i915_gem_object_map(struct 
drm_i915_gem_object *obj,
        void *addr;
 
        if (i915_gem_object_is_lmem(obj)) {
-               void __iomem *io;
+               struct vm_struct *area;
+               dma_addr_t addr;
+               pte_t **ptes;
+               void *mem;
 
                if (type != I915_MAP_WC)
                        return NULL;
 
-               io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
-               return (void __force *)io;
+               if (i915_gem_object_is_contiguous(obj)) {
+                       void __iomem *io =
+                               i915_gem_object_lmem_io_map(obj,
+                                                           0, obj->base.size);
+
+                       return (void __force *)io;
+               }
+
+               mem = kvmalloc_array(obj->base.size >> PAGE_SHIFT,
+                                    sizeof(*ptes),
+                                    GFP_KERNEL);
+               if (!mem)
+                       return NULL;
+
+               area = alloc_vm_area(obj->base.size, ptes);
+               if (!area) {
+                       kvfree(mem);
+                       return NULL;
+               }
+
+               ptes = mem;
+               for_each_sgt_daddr(addr, sgt_iter, sgt)
+                       **ptes++ = io_wc_pte(addr);
+               kvfree(mem);
+
+               return area->addr;
        }
 
        /* A single page can always be kmapped */
-- 
2.25.0.rc0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to