DRM_XE_SVM kernel config entry is added so
xe svm feature can be configured before kernel
compilation.

Signed-off-by: Oak Zeng <oak.z...@intel.com>
Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@intel.com>
Cc: Brian Welty <brian.we...@intel.com>
---
 drivers/gpu/drm/xe/Kconfig   | 22 ++++++++++++++++++++++
 drivers/gpu/drm/xe/Makefile  |  5 +++++
 drivers/gpu/drm/xe/xe_mmio.c |  5 +++++
 drivers/gpu/drm/xe/xe_vm.c   |  2 ++
 4 files changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 5b3da06e7ba3..a57f0972e9ae 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -83,6 +83,28 @@ config DRM_XE_FORCE_PROBE
 
          Use "!*" to block the probe of the driver for all known devices.
 
+config DRM_XE_SVM
+    bool "Enable Shared Virtual Memory support in xe"
+    depends on DRM_XE
+    depends on ARCH_ENABLE_MEMORY_HOTPLUG
+    depends on ARCH_ENABLE_MEMORY_HOTREMOVE
+    depends on MEMORY_HOTPLUG
+    depends on MEMORY_HOTREMOVE
+    depends on ARCH_HAS_PTE_DEVMAP
+    depends on SPARSEMEM_VMEMMAP
+    depends on ZONE_DEVICE
+    depends on DEVICE_PRIVATE
+    depends on MMU
+    select HMM_MIRROR
+    select MMU_NOTIFIER
+    default y
+    help
+      Choose this option if you want Shared Virtual Memory (SVM)
+      support in xe. With SVM, virtual address space is shared
+         between CPU and GPU. This means any virtual address such
+         as malloc or mmap returns, variables on stack, or global
+         memory pointers, can be used for GPU transparently.
+
 menu "drm/Xe Debugging"
 depends on DRM_XE
 depends on EXPERT
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index df8601d6a59f..b75bdbc5e42c 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -282,6 +282,11 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
        i915-display/skl_universal_plane.o \
        i915-display/skl_watermark.o
 
+xe-$(CONFIG_DRM_XE_SVM) += xe_svm.o \
+                                                  xe_svm_devmem.o \
+                                                  xe_svm_range.o \
+                                                  xe_svm_migrate.o
+
 ifeq ($(CONFIG_ACPI),y)
        xe-$(CONFIG_DRM_XE_DISPLAY) += \
                i915-display/intel_acpi.o \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index cfe25a3c7059..7c95f675ed92 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -286,7 +286,9 @@ int xe_mmio_probe_vram(struct xe_device *xe)
                }
 
                io_size -= min_t(u64, tile_size, io_size);
+#if IS_ENABLED(CONFIG_DRM_XE_SVM)
                xe_svm_devm_add(tile, &tile->mem.vram);
+#endif
        }
 
        xe->mem.vram.actual_physical_size = total_size;
@@ -361,8 +363,11 @@ static void mmio_fini(struct drm_device *drm, void *arg)
        pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
        if (xe->mem.vram.mapping)
                iounmap(xe->mem.vram.mapping);
+
+#if IS_ENABLED(CONFIG_DRM_XE_SVM)
        for_each_tile(tile, xe, id) {
                xe_svm_devm_remove(xe, &tile->mem.vram);
+#endif
        }
 }
 
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3c301a5c7325..12d82f2fc195 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1376,7 +1376,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 
flags)
                xe->usm.num_vm_in_non_fault_mode++;
        mutex_unlock(&xe->usm.lock);
 
+#if IS_ENABLED(CONFIG_DRM_XE_SVM)
        vm->svm = xe_create_svm(vm);
+#endif
        trace_xe_vm_create(vm);
 
        return vm;
-- 
2.26.3

Reply via email to