On devices with non-mappable LMEM ensure we always allocate the pages
within the mappable portion. For now we assume that all LMEM buffers
will require CPU access, which is also inline with pretty much all
current kernel internal users. In the next patch we will introduce a new
flag to override this behaviour.

Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Acked-by: Nirmoy Das <nirmoy....@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 4 ++++
 drivers/gpu/drm/i915/intel_region_ttm.c | 5 +++++
 2 files changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 53c183f13a38..0363987dee97 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -130,6 +130,10 @@ i915_ttm_place_from_region(const struct 
intel_memory_region *mr,
 
        if (flags & I915_BO_ALLOC_CONTIGUOUS)
                place->flags = TTM_PL_FLAG_CONTIGUOUS;
+       if (mr->io_size && mr->io_size < mr->total) {
+               place->fpfn = 0;
+               place->lpfn = mr->io_size >> PAGE_SHIFT;
+       }
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c 
b/drivers/gpu/drm/i915/intel_region_ttm.c
index 7dea07c579aa..5a40310d6fdd 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -199,6 +199,11 @@ intel_region_ttm_resource_alloc(struct intel_memory_region 
*mem,
        struct ttm_resource *res;
        int ret;
 
+       if (mem->io_size && mem->io_size < mem->total) {
+               place.fpfn = 0;
+               place.lpfn = mem->io_size >> PAGE_SHIFT;
+       }
+
        mock_bo.base.size = size;
        mock_bo.bdev = &mem->i915->bdev;
        place.flags = flags;
-- 
2.34.1

Reply via email to