Distinguishing system vm and general vm is a good idea, but I'm not sure about 
renaming GTT to sysvm part, especially TTM TT stays there. Maybe we just need 
rename GART functions to SYSVM.

Regards,
David Zhou

-----Original Message-----
From: amd-gfx [mailto:[email protected]] On Behalf Of 
Christian K?nig
Sent: Monday, July 03, 2017 5:45 PM
To: [email protected]
Subject: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM

From: Christian König <[email protected]>

Just mass rename all names related to the hardware GART/GTT functions to SYSVM.

The name of symbols related to the TTM TT domain stay the same.

This should improve the distinction between the two.

Signed-off-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
 drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
 24 files changed, 749 insertions(+), 748 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig 
b/drivers/gpu/drm/amd/amdgpu/Kconfig
index e8af1f5..ebbac01 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
          This option selects CONFIG_MMU_NOTIFIER if it isn't already
          selected to enabled full userptr support.
 
-config DRM_AMDGPU_GART_DEBUGFS
-       bool "Allow GART access through debugfs"
+config DRM_AMDGPU_SYSVM_DEBUGFS
+       bool "Allow SYSVM access through debugfs"
        depends on DRM_AMDGPU
        depends on DEBUG_FS
        default n
        help
-         Selecting this option creates a debugfs file to inspect the mapped
-         pages. Uses more memory for housekeeping, enable only for debugging.
+         Selecting this option creates a debugfs file to inspect the SYSVM
+         mapped pages. Uses more memory for housekeeping, enable only for
+         debugging.
 
 source "drivers/gpu/drm/amd/acp/Kconfig"
 source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 3661110..d80d49f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
 # add KMS driver
 amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
-       atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
+       atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
        amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
        amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
        amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4a2b33d..abe191f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
 };
 
 /* provided by the gmc block */
-struct amdgpu_gart_funcs {
+struct amdgpu_sysvm_funcs {
        /* flush the vm tlb via mmio */
        void (*flush_gpu_tlb)(struct amdgpu_device *adev,
                              uint32_t vmid);
@@ -543,39 +543,39 @@ struct amdgpu_mc;
 #define AMDGPU_GPU_PAGE_SHIFT 12
 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & 
~AMDGPU_GPU_PAGE_MASK)
 
-struct amdgpu_gart {
+struct amdgpu_sysvm {
        dma_addr_t                      table_addr;
        struct amdgpu_bo                *robj;
        void                            *ptr;
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
        struct page                     **pages;
 #endif
        bool                            ready;
 
        /* Asic default pte flags */
-       uint64_t                        gart_pte_flags;
+       uint64_t                        sysvm_pte_flags;
 
-       const struct amdgpu_gart_funcs *gart_funcs;
+       const struct amdgpu_sysvm_funcs *sysvm_funcs;
 };
 
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_sysvm_init(struct amdgpu_device *adev);
+void amdgpu_sysvm_fini(struct amdgpu_device *adev);
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
                    int pages, dma_addr_t *dma_addr, uint64_t flags,
                    void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint64_t flags);
 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
@@ -604,15 +604,15 @@ struct amdgpu_mc {
         * about vram size near mc fb location */
        u64                     mc_vram_size;
        u64                     visible_vram_size;
-       u64                     gtt_size;
-       u64                     gtt_start;
-       u64                     gtt_end;
+       u64                     sysvm_size;
+       u64                     sysvm_start;
+       u64                     sysvm_end;
        u64                     vram_start;
        u64                     vram_end;
        unsigned                vram_width;
        u64                     real_vram_size;
        int                     vram_mtrr;
-       u64                     gtt_base_align;
+       u64                     sysvm_base_align;
        u64                     mc_mask;
        const struct firmware   *fw;    /* MC firmware */
        uint32_t                fw_version;
@@ -1575,7 +1575,7 @@ struct amdgpu_device {
 
        /* MC */
        struct amdgpu_mc                mc;
-       struct amdgpu_gart              gart;
+       struct amdgpu_sysvm             sysvm;
        struct amdgpu_dummy_page        dummy_page;
        struct amdgpu_vm_manager        vm_manager;
        struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
@@ -1686,8 +1686,8 @@ struct amdgpu_device {
        struct list_head                shadow_list;
        struct mutex                    shadow_list_lock;
        /* link all gtt */
-       spinlock_t                      gtt_list_lock;
-       struct list_head                gtt_list;
+       spinlock_t                      sysvm_list_lock;
+       struct list_head                sysvm_list;
        /* keep an lru list of rings by HW IP */
        struct list_head                ring_lru_list;
        spinlock_t                      ring_lru_list_lock;
@@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) 
(adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, 
v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_asic_get_config_memsize(adev) 
(adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) 
(adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) 
(adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) 
(adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) 
(adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) 
(adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_sysvm_get_vm_pde(adev, addr) 
(adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) 
((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) 
((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), 
(incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) 
((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), 
(incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) 
(adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_vm_get_pte_flags(adev, flags) 
(adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
@@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt 
*ttm,
                                 struct ttm_mem_reg *mem);
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, 
u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5b1220f..46a82d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, 
struct amdgpu_mc *mc, u64
 }
 
 /**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_sysvm_location - try to find SYSVM location
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place SYSVM before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If SYSVM size is bigger than space left then we ajust SYSVM size.
  * Thus function will never fails.
  *
- * FIXME: when reducing GTT size align new size on power of 2.
+ * FIXME: when reducing SYSVM size align new size on power of 2.
  */
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
 {
        u64 size_af, size_bf;
 
-       size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & 
~mc->gtt_base_align;
-       size_bf = mc->vram_start & ~mc->gtt_base_align;
+       size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & 
~mc->sysvm_base_align;
+       size_bf = mc->vram_start & ~mc->sysvm_base_align;
        if (size_bf > size_af) {
-               if (mc->gtt_size > size_bf) {
-                       dev_warn(adev->dev, "limiting GTT\n");
-                       mc->gtt_size = size_bf;
+               if (mc->sysvm_size > size_bf) {
+                       dev_warn(adev->dev, "limiting SYSVM\n");
+                       mc->sysvm_size = size_bf;
                }
-               mc->gtt_start = 0;
+               mc->sysvm_start = 0;
        } else {
-               if (mc->gtt_size > size_af) {
-                       dev_warn(adev->dev, "limiting GTT\n");
-                       mc->gtt_size = size_af;
+               if (mc->sysvm_size > size_af) {
+                       dev_warn(adev->dev, "limiting SYSVM\n");
+                       mc->sysvm_size = size_af;
                }
-               mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & 
~mc->gtt_base_align;
+               mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & 
~mc->sysvm_base_align;
        }
-       mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
-       dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
-                       mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+       mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
+       dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
+                       mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
 }
 
 /*
@@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
 
 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
 {
-       memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
+       memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
 }
 
 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
 {
-       return !!memcmp(adev->gart.ptr, adev->reset_magic,
+       return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
                        AMDGPU_RESET_MAGIC_NUM);
 }
 
@@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->flags = flags;
        adev->asic_type = flags & AMD_ASIC_MASK;
        adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
-       adev->mc.gtt_size = 512 * 1024 * 1024;
+       adev->mc.sysvm_size = 512 * 1024 * 1024;
        adev->accel_working = false;
        adev->num_rings = 0;
        adev->mman.buffer_funcs = NULL;
        adev->mman.buffer_funcs_ring = NULL;
        adev->vm_manager.vm_pte_funcs = NULL;
        adev->vm_manager.vm_pte_num_rings = 0;
-       adev->gart.gart_funcs = NULL;
+       adev->sysvm.sysvm_funcs = NULL;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
        adev->smc_rreg = &amdgpu_invalid_rreg;
@@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
-       INIT_LIST_HEAD(&adev->gtt_list);
-       spin_lock_init(&adev->gtt_list_lock);
+       INIT_LIST_HEAD(&adev->sysvm_list);
+       spin_lock_init(&adev->sysvm_list_lock);
 
        INIT_LIST_HEAD(&adev->ring_lru_list);
        spin_lock_init(&adev->ring_lru_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
deleted file mode 100644
index c808388..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-#include <drm/drmP.h>
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-
-/*
- * GART
- * The GART (Graphics Aperture Remapping Table) is an aperture
- * in the GPU's address space.  System pages can be mapped into
- * the aperture and look like contiguous pages from the GPU's
- * perspective.  A page table maps the pages in the aperture
- * to the actual backing pages in system memory.
- *
- * Radeon GPUs support both an internal GART, as described above,
- * and AGP.  AGP works similarly, but the GART table is configured
- * and maintained by the northbridge rather than the driver.
- * Radeon hw has a separate AGP aperture that is programmed to
- * point to the AGP aperture provided by the northbridge and the
- * requests are passed through to the northbridge aperture.
- * Both AGP and internal GART can be used at the same time, however
- * that is not currently supported by the driver.
- *
- * This file handles the common internal GART management.
- */
-
-/*
- * Common GART table functions.
- */
-
-/**
- * amdgpu_gart_set_defaults - set the default gtt_size
- *
- * @adev: amdgpu_device pointer
- *
- * Set the default gtt_size based on parameters and available VRAM.
- */
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
-{
-       /* unless the user had overridden it, set the gart
-        * size equal to the 1024 or vram, whichever is larger.
-        */
-       if (amdgpu_gart_size == -1)
-               adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-                                       adev->mc.mc_vram_size);
-       else
-               adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-}
-
-/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
- */
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
-{
-       void *ptr;
-
-       ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
-                                  &adev->gart.table_addr);
-       if (ptr == NULL) {
-               return -ENOMEM;
-       }
-#ifdef CONFIG_X86
-       if (0) {
-               set_memory_uc((unsigned long)ptr,
-                             adev->gart.table_size >> PAGE_SHIFT);
-       }
-#endif
-       adev->gart.ptr = ptr;
-       memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
-       return 0;
-}
-
-/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- */
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
-{
-       if (adev->gart.ptr == NULL) {
-               return;
-       }
-#ifdef CONFIG_X86
-       if (0) {
-               set_memory_wb((unsigned long)adev->gart.ptr,
-                             adev->gart.table_size >> PAGE_SHIFT);
-       }
-#endif
-       pci_free_consistent(adev->pdev, adev->gart.table_size,
-                           (void *)adev->gart.ptr,
-                           adev->gart.table_addr);
-       adev->gart.ptr = NULL;
-       adev->gart.table_addr = 0;
-}
-
-/**
- * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate video memory for GART page table
- * (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (adev->gart.robj == NULL) {
-               r = amdgpu_bo_create(adev, adev->gart.table_size,
-                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-                                    AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-                                    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-                                    NULL, NULL, &adev->gart.robj);
-               if (r) {
-                       return r;
-               }
-       }
-       return 0;
-}
-
-/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
-{
-       uint64_t gpu_addr;
-       int r;
-
-       r = amdgpu_bo_reserve(adev->gart.robj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = amdgpu_bo_pin(adev->gart.robj,
-                               AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
-       if (r) {
-               amdgpu_bo_unreserve(adev->gart.robj);
-               return r;
-       }
-       r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
-       if (r)
-               amdgpu_bo_unpin(adev->gart.robj);
-       amdgpu_bo_unreserve(adev->gart.robj);
-       adev->gart.table_addr = gpu_addr;
-       return r;
-}
-
-/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
- */
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (adev->gart.robj == NULL) {
-               return;
-       }
-       r = amdgpu_bo_reserve(adev->gart.robj, true);
-       if (likely(r == 0)) {
-               amdgpu_bo_kunmap(adev->gart.robj);
-               amdgpu_bo_unpin(adev->gart.robj);
-               amdgpu_bo_unreserve(adev->gart.robj);
-               adev->gart.ptr = NULL;
-       }
-}
-
-/**
- * amdgpu_gart_table_vram_free - free gart page table vram
- *
- * @adev: amdgpu_device pointer
- *
- * Free the video memory used for the GART page table
- * (pcie r4xx, r5xx+).  These asics require the gart table to
- * be in video memory.
- */
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
-{
-       if (adev->gart.robj == NULL) {
-               return;
-       }
-       amdgpu_bo_unref(&adev->gart.robj);
-}
-
-/*
- * Common gart functions.
- */
-/**
- * amdgpu_gart_unbind - unbind pages from the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to unbind
- *
- * Unbinds the requested pages from the gart page table and
- * replaces them with the dummy page (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-                       int pages)
-{
-       unsigned t;
-       unsigned p;
-       int i, j;
-       u64 page_base;
-       /* Starting from VEGA10, system bit must be 0 to mean invalid. */
-       uint64_t flags = 0;
-
-       if (!adev->gart.ready) {
-               WARN(1, "trying to unbind memory from uninitialized GART !\n");
-               return -EINVAL;
-       }
-
-       t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-       for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-               adev->gart.pages[p] = NULL;
-#endif
-               page_base = adev->dummy_page.addr;
-               if (!adev->gart.ptr)
-                       continue;
-
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-                       amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
-                                               t, page_base, flags);
-                       page_base += AMDGPU_GPU_PAGE_SIZE;
-               }
-       }
-       mb();
-       amdgpu_gart_flush_gpu_tlb(adev, 0);
-       return 0;
-}
-
-/**
- * amdgpu_gart_map - map dma_addresses into GART entries
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Map the dma_addresses into GART entries (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-                   int pages, dma_addr_t *dma_addr, uint64_t flags,
-                   void *dst)
-{
-       uint64_t page_base;
-       unsigned i, j, t;
-
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
-       t = offset / AMDGPU_GPU_PAGE_SIZE;
-
-       for (i = 0; i < pages; i++) {
-               page_base = dma_addr[i];
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-                       amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
-                       page_base += AMDGPU_GPU_PAGE_SIZE;
-               }
-       }
-       return 0;
-}
-
-/**
- * amdgpu_gart_bind - bind pages into the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @pagelist: pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Binds the requested pages to the gart page table
- * (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-                    int pages, struct page **pagelist, dma_addr_t *dma_addr,
-                    uint64_t flags)
-{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       unsigned i,t,p;
-#endif
-       int r;
-
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-       for (i = 0; i < pages; i++, p++)
-               adev->gart.pages[p] = pagelist[i];
-#endif
-
-       if (adev->gart.ptr) {
-               r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                           adev->gart.ptr);
-               if (r)
-                       return r;
-       }
-
-       mb();
-       amdgpu_gart_flush_gpu_tlb(adev, 0);
-       return 0;
-}
-
-/**
- * amdgpu_gart_init - init the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page and init the gart driver info (all asics).
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_init(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (adev->dummy_page.page)
-               return 0;
-
-       /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
-       if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
-               DRM_ERROR("Page size is smaller than GPU page size!\n");
-               return -EINVAL;
-       }
-       r = amdgpu_dummy_page_init(adev);
-       if (r)
-               return r;
-       /* Compute table size */
-       adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
-       adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
-       DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
-                adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       /* Allocate pages table */
-       adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
-       if (adev->gart.pages == NULL) {
-               amdgpu_gart_fini(adev);
-               return -ENOMEM;
-       }
-#endif
-
-       return 0;
-}
-
-/**
- * amdgpu_gart_fini - tear down the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the gart driver info and free the dummy page (all asics).
- */
-void amdgpu_gart_fini(struct amdgpu_device *adev)
-{
-       if (adev->gart.ready) {
-               /* unbind pages */
-               amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
-       }
-       adev->gart.ready = false;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       vfree(adev->gart.pages);
-       adev->gart.pages = NULL;
-#endif
-       amdgpu_dummy_page_fini(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627..73a1c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 
unsigned size,
        if (r)
                kfree(*job);
        else
-               (*job)->vm_pd_addr = adev->gart.table_addr;
+               (*job)->vm_pd_addr = adev->sysvm.table_addr;
 
        return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
new file mode 100644
index 0000000..50fc8d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+/*
+ * SYSVM
+ * The system VM (previously called GART) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal SYSVM based GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal SYSVM management.
+ */
+
+/*
+ * Common SYSVM table functions.
+ */
+
+/**
+ * amdgpu_sysvm_set_defaults - set the default sysvm_size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the default sysvm_size based on parameters and available VRAM.
+ */
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
+{
+       /* unless the user had overridden it, set the gart
+        * size equal to the 1024 or vram, whichever is larger.
+        */
+       if (amdgpu_gart_size == -1)
+               adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+                                       adev->mc.mc_vram_size);
+       else
+               adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
+{
+       void *ptr;
+
+       ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
+                                  &adev->sysvm.table_addr);
+       if (ptr == NULL) {
+               return -ENOMEM;
+       }
+#ifdef CONFIG_X86
+       if (0) {
+               set_memory_uc((unsigned long)ptr,
+                             adev->sysvm.table_size >> PAGE_SHIFT);
+       }
+#endif
+       adev->sysvm.ptr = ptr;
+       memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_free - free system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
+{
+       if (adev->sysvm.ptr == NULL) {
+               return;
+       }
+#ifdef CONFIG_X86
+       if (0) {
+               set_memory_wb((unsigned long)adev->sysvm.ptr,
+                             adev->sysvm.table_size >> PAGE_SHIFT);
+       }
+#endif
+       pci_free_consistent(adev->pdev, adev->sysvm.table_size,
+                           (void *)adev->sysvm.ptr,
+                           adev->sysvm.table_addr);
+       adev->sysvm.ptr = NULL;
+       adev->sysvm.table_addr = 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for SYSVM page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->sysvm.robj == NULL) {
+               r = amdgpu_bo_create(adev, adev->sysvm.table_size,
+                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+                                    AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+                                    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+                                    NULL, NULL, &adev->sysvm.robj);
+               if (r) {
+                       return r;
+               }
+       }
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Pin the SYSVM page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
+{
+       uint64_t gpu_addr;
+       int r;
+
+       r = amdgpu_bo_reserve(adev->sysvm.robj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = amdgpu_bo_pin(adev->sysvm.robj,
+                               AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+       if (r) {
+               amdgpu_bo_unreserve(adev->sysvm.robj);
+               return r;
+       }
+       r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
+       if (r)
+               amdgpu_bo_unpin(adev->sysvm.robj);
+       amdgpu_bo_unreserve(adev->sysvm.robj);
+       adev->sysvm.table_addr = gpu_addr;
+       return r;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->sysvm.robj == NULL) {
+               return;
+       }
+       r = amdgpu_bo_reserve(adev->sysvm.robj, true);
+       if (likely(r == 0)) {
+               amdgpu_bo_kunmap(adev->sysvm.robj);
+               amdgpu_bo_unpin(adev->sysvm.robj);
+               amdgpu_bo_unreserve(adev->sysvm.robj);
+               adev->sysvm.ptr = NULL;
+       }
+}
+
+/**
+ * amdgpu_sysvm_table_vram_free - free gart page table vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
+{
+       if (adev->sysvm.robj == NULL) {
+               return;
+       }
+       amdgpu_bo_unref(&adev->sysvm.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * amdgpu_sysvm_unbind - unbind pages from the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
+                       int pages)
+{
+       unsigned t;
+       unsigned p;
+       int i, j;
+       u64 page_base;
+       /* Starting from VEGA10, system bit must be 0 to mean invalid. */
+       uint64_t flags = 0;
+
+       if (!adev->sysvm.ready) {
+               WARN(1, "trying to unbind memory from uninitialized GART !\n");
+               return -EINVAL;
+       }
+
+       t = offset / AMDGPU_GPU_PAGE_SIZE;
+       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       for (i = 0; i < pages; i++, p++) {
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+               adev->sysvm.pages[p] = NULL;
+#endif
+               page_base = adev->dummy_page.addr;
+               if (!adev->sysvm.ptr)
+                       continue;
+
+               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+                       amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
+                                               t, page_base, flags);
+                       page_base += AMDGPU_GPU_PAGE_SIZE;
+               }
+       }
+       mb();
+       amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
+                   int pages, dma_addr_t *dma_addr, uint64_t flags,
+                   void *dst)
+{
+       uint64_t page_base;
+       unsigned i, j, t;
+
+       if (!adev->sysvm.ready) {
+               WARN(1, "trying to bind memory to uninitialized GART !\n");
+               return -EINVAL;
+       }
+
+       t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+       for (i = 0; i < pages; i++) {
+               page_base = dma_addr[i];
+               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+                       amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, 
flags);
+                       page_base += AMDGPU_GPU_PAGE_SIZE;
+               }
+       }
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_bind - bind pages into the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
+                    int pages, struct page **pagelist, dma_addr_t *dma_addr,
+                    uint64_t flags)
+{
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+       unsigned i,t,p;
+#endif
+       int r;
+
+       if (!adev->sysvm.ready) {
+               WARN(1, "trying to bind memory to uninitialized GART !\n");
+               return -EINVAL;
+       }
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+       t = offset / AMDGPU_GPU_PAGE_SIZE;
+       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       for (i = 0; i < pages; i++, p++)
+               adev->sysvm.pages[p] = pagelist[i];
+#endif
+
+       if (adev->sysvm.ptr) {
+               r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
+                           adev->sysvm.ptr);
+               if (r)
+                       return r;
+       }
+
+       mb();
+       amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_init - init the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->dummy_page.page)
+               return 0;
+
+       /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
+       if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
+               DRM_ERROR("Page size is smaller than GPU page size!\n");
+               return -EINVAL;
+       }
+       r = amdgpu_dummy_page_init(adev);
+       if (r)
+               return r;
+       /* Compute table size */
+       adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
+       adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
+       DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+                adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+       /* Allocate pages table */
+       adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
+       if (adev->sysvm.pages == NULL) {
+               amdgpu_sysvm_fini(adev);
+               return -ENOMEM;
+       }
+#endif
+
+       return 0;
+}
+
+/**
+ * amdgpu_sysvm_fini - tear down the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void amdgpu_sysvm_fini(struct amdgpu_device *adev)
+{
+       if (adev->sysvm.ready) {
+               /* unbind pages */
+               amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
+       }
+       adev->sysvm.ready = false;
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+       vfree(adev->sysvm.pages);
+       adev->sysvm.pages = NULL;
+#endif
+       amdgpu_dummy_page_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index d02e611..651712e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct amdgpu_bo *vram_obj = NULL;
-       struct amdgpu_bo **gtt_obj = NULL;
-       uint64_t gtt_addr, vram_addr;
+       struct amdgpu_bo **sysvm_obj = NULL;
+       uint64_t sysvm_addr, vram_addr;
        unsigned n, size;
        int i, r;
 
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+       n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                if (adev->rings[i])
                        n -= adev->rings[i]->ring_size;
@@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                n -= adev->irq.ih.ring_size;
        n /= size;
 
-       gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
-       if (!gtt_obj) {
+       sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
+       if (!sysvm_obj) {
                DRM_ERROR("Failed to allocate %d pointers\n", n);
                r = 1;
                goto out_cleanup;
@@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                goto out_unres;
        }
        for (i = 0; i < n; i++) {
-               void *gtt_map, *vram_map;
-               void **gtt_start, **gtt_end;
+               void *sysvm_map, *vram_map;
+               void **sysvm_start, **sysvm_end;
                void **vram_start, **vram_end;
                struct dma_fence *fence = NULL;
 
                r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
                                     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
-                                    NULL, gtt_obj + i);
+                                    NULL, sysvm_obj + i);
                if (r) {
                        DRM_ERROR("Failed to create GTT object %d\n", i);
                        goto out_lclean;
                }
 
-               r = amdgpu_bo_reserve(gtt_obj[i], false);
+               r = amdgpu_bo_reserve(sysvm_obj[i], false);
                if (unlikely(r != 0))
                        goto out_lclean_unref;
-               r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+               r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, 
&sysvm_addr);
                if (r) {
                        DRM_ERROR("Failed to pin GTT object %d\n", i);
                        goto out_lclean_unres;
                }
 
-               r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+               r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
                if (r) {
                        DRM_ERROR("Failed to map GTT object %d\n", i);
                        goto out_lclean_unpin;
                }
 
-               for (gtt_start = gtt_map, gtt_end = gtt_map + size;
-                    gtt_start < gtt_end;
-                    gtt_start++)
-                       *gtt_start = gtt_start;
+               for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
+                    sysvm_start < sysvm_end;
+                    sysvm_start++)
+                       *sysvm_start = sysvm_start;
 
-               amdgpu_bo_kunmap(gtt_obj[i]);
+               amdgpu_bo_kunmap(sysvm_obj[i]);
 
-               r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+               r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
                                       size, NULL, &fence, false, false);
 
                if (r) {
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device 
*adev)
                        goto out_lclean_unpin;
                }
 
-               for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+               for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
                     vram_start = vram_map, vram_end = vram_map + size;
                     vram_start < vram_end;
-                    gtt_start++, vram_start++) {
-                       if (*vram_start != gtt_start) {
+                    sysvm_start++, vram_start++) {
+                       if (*vram_start != sysvm_start) {
                                DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 
0x%p, "
                                          "expected 0x%p (GTT/VRAM offset "
                                          "0x%16llx/0x%16llx)\n",
-                                         i, *vram_start, gtt_start,
+                                         i, *vram_start, sysvm_start,
                                          (unsigned long long)
-                                         (gtt_addr - adev->mc.gtt_start +
-                                          (void*)gtt_start - gtt_map),
+                                         (sysvm_addr - adev->mc.sysvm_start +
+                                          (void*)sysvm_start - sysvm_map),
                                          (unsigned long long)
                                          (vram_addr - adev->mc.vram_start +
-                                          (void*)gtt_start - gtt_map));
+                                          (void*)sysvm_start - sysvm_map));
                                amdgpu_bo_kunmap(vram_obj);
                                goto out_lclean_unpin;
                        }
@@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
                amdgpu_bo_kunmap(vram_obj);
 
-               r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
+               r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
                                       size, NULL, &fence, false, false);
 
                if (r) {
@@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device 
*adev)
 
                dma_fence_put(fence);
 
-               r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+               r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
                if (r) {
                        DRM_ERROR("Failed to map GTT object after copy %d\n", 
i);
                        goto out_lclean_unpin;
                }
 
-               for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+               for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
                     vram_start = vram_map, vram_end = vram_map + size;
-                    gtt_start < gtt_end;
-                    gtt_start++, vram_start++) {
-                       if (*gtt_start != vram_start) {
+                    sysvm_start < sysvm_end;
+                    sysvm_start++, vram_start++) {
+                       if (*sysvm_start != vram_start) {
                                DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 
0x%p, "
                                          "expected 0x%p (VRAM/GTT offset "
                                          "0x%16llx/0x%16llx)\n",
-                                         i, *gtt_start, vram_start,
+                                         i, *sysvm_start, vram_start,
                                          (unsigned long long)
                                          (vram_addr - adev->mc.vram_start +
                                           (void*)vram_start - vram_map),
                                          (unsigned long long)
-                                         (gtt_addr - adev->mc.gtt_start +
+                                         (sysvm_addr - adev->mc.sysvm_start +
                                           (void*)vram_start - vram_map));
-                               amdgpu_bo_kunmap(gtt_obj[i]);
+                               amdgpu_bo_kunmap(sysvm_obj[i]);
                                goto out_lclean_unpin;
                        }
                }
 
-               amdgpu_bo_kunmap(gtt_obj[i]);
+               amdgpu_bo_kunmap(sysvm_obj[i]);
 
                DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 
0x%llx\n",
-                        gtt_addr - adev->mc.gtt_start);
+                        sysvm_addr - adev->mc.sysvm_start);
                continue;
 
 out_lclean_unpin:
-               amdgpu_bo_unpin(gtt_obj[i]);
+               amdgpu_bo_unpin(sysvm_obj[i]);
 out_lclean_unres:
-               amdgpu_bo_unreserve(gtt_obj[i]);
+               amdgpu_bo_unreserve(sysvm_obj[i]);
 out_lclean_unref:
-               amdgpu_bo_unref(&gtt_obj[i]);
+               amdgpu_bo_unref(&sysvm_obj[i]);
 out_lclean:
                for (--i; i >= 0; --i) {
-                       amdgpu_bo_unpin(gtt_obj[i]);
-                       amdgpu_bo_unreserve(gtt_obj[i]);
-                       amdgpu_bo_unref(&gtt_obj[i]);
+                       amdgpu_bo_unpin(sysvm_obj[i]);
+                       amdgpu_bo_unreserve(sysvm_obj[i]);
+                       amdgpu_bo_unref(&sysvm_obj[i]);
                }
                if (fence)
                        dma_fence_put(fence);
@@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 out_unref:
        amdgpu_bo_unref(&vram_obj);
 out_cleanup:
-       kfree(gtt_obj);
+       kfree(sysvm_obj);
        if (r) {
                pr_warn("Error while testing BO move\n");
        }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c7a6c5..9240357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device 
*adev)
                goto error_bo;
        }
 
-       mutex_init(&adev->mman.gtt_window_lock);
+       mutex_init(&adev->mman.sysvm_window_lock);
 
        ring = adev->mman.buffer_funcs_ring;
        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
@@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device 
*adev)
        if (adev->mman.mem_global_referenced) {
                amd_sched_entity_fini(adev->mman.entity.sched,
                                      &adev->mman.entity);
-               mutex_destroy(&adev->mman.gtt_window_lock);
+               mutex_destroy(&adev->mman.sysvm_window_lock);
                drm_global_item_unref(&adev->mman.bo_global_ref.ref);
                drm_global_item_unref(&adev->mman.mem_global_ref);
                adev->mman.mem_global_referenced = false;
@@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
                break;
        case TTM_PL_TT:
                man->func = &amdgpu_gtt_mgr_func;
-               man->gpu_offset = adev->mc.gtt_start;
+               man->gpu_offset = adev->mc.sysvm_start;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
 
        num_pages = new_mem->num_pages;
-       mutex_lock(&adev->mman.gtt_window_lock);
+       mutex_lock(&adev->mman.sysvm_window_lock);
        while (num_pages) {
                unsigned long cur_pages = min(min(old_size, new_size),
                                              
(u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
@@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                        new_start += cur_pages * PAGE_SIZE;
                }
        }
-       mutex_unlock(&adev->mman.gtt_window_lock);
+       mutex_unlock(&adev->mman.sysvm_window_lock);
 
        r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
        dma_fence_put(fence);
        return r;
 
 error:
-       mutex_unlock(&adev->mman.gtt_window_lock);
+       mutex_unlock(&adev->mman.sysvm_window_lock);
 
        if (fence)
                dma_fence_wait(fence, false);
@@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct 
ttm_mem_reg *mem)
        uint64_t flags;
        int r;
 
-       spin_lock(&gtt->adev->gtt_list_lock);
+       spin_lock(&gtt->adev->sysvm_list_lock);
        flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
        gtt->offset = (u64)mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+       r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
                ttm->pages, gtt->ttm.dma_address, flags);
 
        if (r) {
@@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct 
ttm_mem_reg *mem)
                goto error_gart_bind;
        }
 
-       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+       list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
 error_gart_bind:
-       spin_unlock(&gtt->adev->gtt_list_lock);
+       spin_unlock(&gtt->adev->sysvm_list_lock);
        return r;
 
 }
@@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
        int r;
 
        bo_mem.mem_type = TTM_PL_TT;
-       spin_lock(&adev->gtt_list_lock);
-       list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+       spin_lock(&adev->sysvm_list_lock);
+       list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
                flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, 
&bo_mem);
-               r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+               r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
                                     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
                                     flags);
                if (r) {
-                       spin_unlock(&adev->gtt_list_lock);
+                       spin_unlock(&adev->sysvm_list_lock);
                        DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
                                  gtt->ttm.ttm.num_pages, gtt->offset);
                        return r;
                }
        }
-       spin_unlock(&adev->gtt_list_lock);
+       spin_unlock(&adev->sysvm_list_lock);
        return 0;
 }
 
@@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
                return 0;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       spin_lock(&gtt->adev->gtt_list_lock);
-       r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
+       spin_lock(&gtt->adev->sysvm_list_lock);
+       r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
        if (r) {
                DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
                          gtt->ttm.ttm.num_pages, gtt->offset);
@@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
        }
        list_del_init(&gtt->list);
 error_unbind:
-       spin_unlock(&gtt->adev->gtt_list_lock);
+       spin_unlock(&gtt->adev->sysvm_list_lock);
        return r;
 }
 
@@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device 
*adev, struct ttm_tt *ttm,
                        flags |= AMDGPU_PTE_SNOOPED;
        }
 
-       flags |= adev->gart.gart_pte_flags;
+       flags |= adev->sysvm.sysvm_pte_flags;
        flags |= AMDGPU_PTE_READABLE;
 
        if (!amdgpu_ttm_tt_is_readonly(ttm))
@@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
                 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
        r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
-                               adev->mc.gtt_size >> PAGE_SHIFT);
+                               adev->mc.sysvm_size >> PAGE_SHIFT);
        if (r) {
                DRM_ERROR("Failed initializing GTT heap.\n");
                return r;
        }
        DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
-                (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+                (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
 
        adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
        adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << 
AMDGPU_GDS_SHIFT;
@@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
        if (adev->gds.oa.total_size)
                ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_bo_device_release(&adev->mman.bdev);
-       amdgpu_gart_fini(adev);
+       amdgpu_sysvm_fini(adev);
        amdgpu_ttm_global_fini(adev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
@@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
        BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
               AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 
-       *addr = adev->mc.gtt_start;
+       *addr = adev->mc.sysvm_start;
        *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
                AMDGPU_GPU_PAGE_SIZE;
 
@@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
        src_addr = num_dw * 4;
        src_addr += job->ibs[0].gpu_addr;
 
-       dst_addr = adev->gart.table_addr;
+       dst_addr = adev->sysvm.table_addr;
        dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
        amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
                                dst_addr, num_bytes);
@@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 
        dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
        flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+       r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
                            &job->ibs[0].ptr[num_dw]);
        if (r)
                goto error_free;
@@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
 
 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
        {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
-       {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
+       {"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
        {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
 #ifdef CONFIG_SWIOTLB
        {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
@@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops 
= {
        .llseek = default_llseek
 };
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
+static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
                                   size_t size, loff_t *pos)
 {
        struct amdgpu_device *adev = file_inode(f)->i_private;
@@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char 
__user *buf,
                struct page *page;
                void *ptr;
 
-               if (p >= adev->gart.num_cpu_pages)
+               if (p >= adev->sysvm.num_cpu_pages)
                        return result;
 
-               page = adev->gart.pages[p];
+               page = adev->sysvm.pages[p];
                if (page) {
                        ptr = kmap(page);
                        ptr += off;
 
                        r = copy_to_user(buf, ptr, cur_size);
-                       kunmap(adev->gart.pages[p]);
+                       kunmap(adev->sysvm.pages[p]);
                } else
                        r = clear_user(buf, cur_size);
 
@@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char 
__user *buf,
        return result;
 }
 
-static const struct file_operations amdgpu_ttm_gtt_fops = {
+static const struct file_operations amdgpu_ttm_sysvm_fops = {
        .owner = THIS_MODULE,
-       .read = amdgpu_ttm_gtt_read,
+       .read = amdgpu_ttm_sysvm_read,
        .llseek = default_llseek
 };
 
@@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device 
*adev)
        i_size_write(ent->d_inode, adev->mc.mc_vram_size);
        adev->mman.vram = ent;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
-                                 adev, &amdgpu_ttm_gtt_fops);
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+       ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
+                                 adev, &amdgpu_ttm_sysvm_fops);
        if (IS_ERR(ent))
                return PTR_ERR(ent);
-       i_size_write(ent->d_inode, adev->mc.gtt_size);
+       i_size_write(ent->d_inode, adev->mc.sysvm_size);
        adev->mman.gtt = ent;
 
 #endif
@@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device 
*adev)
        debugfs_remove(adev->mman.vram);
        adev->mman.vram = NULL;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
        debugfs_remove(adev->mman.gtt);
        adev->mman.gtt = NULL;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 4f5c1da..1443038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -53,7 +53,7 @@ struct amdgpu_mman {
        const struct amdgpu_buffer_funcs        *buffer_funcs;
        struct amdgpu_ring                      *buffer_funcs_ring;
 
-       struct mutex                            gtt_window_lock;
+       struct mutex                            sysvm_window_lock;
        /* Scheduler entity for buffer moves */
        struct amd_sched_entity                 entity;
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1d1810d..8dbacec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct 
amdgpu_pte_update_params *params,
                value = params->pages_addr ?
                        amdgpu_vm_map_gart(params->pages_addr, addr) :
                        addr;
-               amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+               amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
                                        i, value, flags);
                addr += incr;
        }
 
        /* Flush HDP */
        mb();
-       amdgpu_gart_flush_gpu_tlb(params->adev, 0);
+       amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
 }
 
 static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
@@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device 
*adev,
                }
 
                pt = amdgpu_bo_gpu_offset(bo);
-               pt = amdgpu_gart_get_vm_pde(adev, pt);
+               pt = amdgpu_sysvm_get_vm_pde(adev, pt);
                if (parent->entries[pt_idx].addr == pt)
                        continue;
 
@@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
+ * @sysvm_flags: flags as they are used in the SYSVM
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
@@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct dma_fence *exclusive,
-                                     uint64_t gtt_flags,
+                                     uint64_t sysvm_flags,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
                                      struct amdgpu_bo_va_mapping *mapping,
@@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct 
amdgpu_device *adev,
                }
 
                if (pages_addr) {
-                       if (flags == gtt_flags)
-                               src = adev->gart.table_addr +
+                       if (flags == sysvm_flags)
+                               src = adev->sysvm.table_addr +
                                        (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
                        else
                                max_entries = min(max_entries, 16ull * 1024ull);
@@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        struct amdgpu_vm *vm = bo_va->vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
-       uint64_t gtt_flags, flags;
+       uint64_t sysvm_flags, flags;
        struct ttm_mem_reg *mem;
        struct drm_mm_node *nodes;
        struct dma_fence *exclusive;
@@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
        if (bo_va->bo) {
                flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-               gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+               sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
                        adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
                        flags : 0;
        } else {
                flags = 0x0;
-               gtt_flags = ~0x0;
+               sysvm_flags = ~0x0;
        }
 
        spin_lock(&vm->status_lock);
@@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
                r = amdgpu_vm_bo_split_mapping(adev, exclusive,
-                                              gtt_flags, pages_addr, vm,
+                                              sysvm_flags, pages_addr, vm,
                                               mapping, flags, nodes,
                                               &bo_va->last_pt_update);
                if (r)
@@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct 
amdgpu_device *adev)
 
        spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
        enable = !!atomic_read(&adev->vm_manager.num_prt_users);
-       adev->gart.gart_funcs->set_prt(adev, enable);
+       adev->sysvm.sysvm_funcs->set_prt(adev, enable);
        spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
@@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct 
amdgpu_device *adev)
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
-       if (!adev->gart.gart_funcs->set_prt)
+       if (!adev->sysvm.sysvm_funcs->set_prt)
                return;
 
        if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device 
*adev,
 {
        struct amdgpu_prt_cb *cb;
 
-       if (!adev->gart.gart_funcs->set_prt)
+       if (!adev->sysvm.sysvm_funcs->set_prt)
                return;
 
        cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt 
*level)
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
-       bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+       bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
        int i;
 
        amd_sched_entity_fini(vm->entity.sched, &vm->entity);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6986285..708fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index a42f483..1290434 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct 
amdgpu_device *adev)
 {
        uint64_t value;
 
-       BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-       value = adev->gart.table_addr - adev->mc.vram_start
+       BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+       value = adev->sysvm.table_addr - adev->mc.vram_start
                + adev->vm_manager.vram_base_offset;
        value &= 0x0000FFFFFFFFF000ULL;
        value |= 0x1; /*valid bit*/
@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct 
amdgpu_device *adev)
        gfxhub_v1_0_init_gart_pt_regs(adev);
 
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-                    (u32)(adev->mc.gtt_start >> 12));
+                    (u32)(adev->mc.sysvm_start >> 12));
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-                    (u32)(adev->mc.gtt_start >> 44));
+                    (u32)(adev->mc.sysvm_start >> 44));
 
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-                    (u32)(adev->mc.gtt_end >> 12));
+                    (u32)(adev->mc.sysvm_end >> 12));
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-                    (u32)(adev->mc.gtt_end >> 44));
+                    (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct 
amdgpu_device *adev)
        }
 }
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
        if (amdgpu_sriov_vf(adev)) {
                /*
@@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
        u32 tmp;
        u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb08..d194b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -24,8 +24,8 @@
 #ifndef __GFXHUB_V1_0_H__
 #define __GFXHUB_V1_0_H__
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5ed6788f..53c3b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -36,7 +36,7 @@
 #include "dce/dce_6_0_sh_mask.h"
 #include "si_enums.h"
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v6_0_wait_for_idle(void *handle);
 
@@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device 
*adev)
        return 0;
 }
 
-static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
                                       struct amdgpu_mc *mc)
 {
        u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device 
*adev,
                mc->mc_vram_size = 0xFFC0000000ULL;
        }
        amdgpu_vram_location(adev, &adev->mc, base);
-       adev->mc.gtt_base_align = 0;
-       amdgpu_gtt_location(adev, mc);
+       adev->mc.sysvm_base_align = 0;
+       amdgpu_sysvm_location(adev, mc);
 }
 
 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
        adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        adev->mc.visible_vram_size = adev->mc.aper_size;
 
-       amdgpu_gart_set_defaults(adev);
-       gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+       amdgpu_sysvm_set_defaults(adev);
+       gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
 
        return 0;
 }
@@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, 
bool enable)
        }
 }
 
-static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
 {
        int r, i;
 
-       if (adev->gart.robj == NULL) {
+       if (adev->sysvm.robj == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gart_table_vram_pin(adev);
+       r = amdgpu_sysvm_table_vram_pin(adev);
        if (r)
                return r;
        /* Setup TLB control */
@@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
               (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
               (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
        /* setup context0 */
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 
12);
        WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(adev->dummy_page.addr >> 12));
        WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device 
*adev)
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
                else
                        WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
        }
 
        /* enable context1-15 */
@@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->mc.gtt_size >> 20),
-                (unsigned long long)adev->gart.table_addr);
-       adev->gart.ready = true;
+                (unsigned)(adev->mc.sysvm_size >> 20),
+                (unsigned long long)adev->sysvm.table_addr);
+       adev->sysvm.ready = true;
        return 0;
 }
 
@@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->gart.robj) {
+       if (adev->sysvm.robj) {
                dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
                return 0;
        }
-       r = amdgpu_gart_init(adev);
+       r = amdgpu_sysvm_init(adev);
        if (r)
                return r;
-       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-       adev->gart.gart_pte_flags = 0;
-       return amdgpu_gart_table_vram_alloc(adev);
+       adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+       adev->sysvm.sysvm_pte_flags = 0;
+       return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
-static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
 {
        /*unsigned i;
 
@@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device 
*adev)
        WREG32(mmVM_L2_CNTL3,
               VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
               (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
-       amdgpu_gart_table_vram_unpin(adev);
+       amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
 {
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
+       amdgpu_sysvm_table_vram_free(adev);
+       amdgpu_sysvm_fini(adev);
 }
 
 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
@@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v6_0_set_gart_funcs(adev);
+       gmc_v6_0_set_sysvm_funcs(adev);
        gmc_v6_0_set_irq_funcs(adev);
 
        return 0;
@@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
                }
        }
 
-       r = gmc_v6_0_gart_enable(adev);
+       r = gmc_v6_0_sysvm_enable(adev);
        if (r)
                return r;
 
@@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-       gmc_v6_0_gart_disable(adev);
+       gmc_v6_0_sysvm_disable(adev);
 
        return 0;
 }
@@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
        .set_powergating_state = gmc_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
        .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
        .set_prt = gmc_v6_0_set_prt,
@@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs 
gmc_v6_0_irq_funcs = {
        .process = gmc_v6_0_process_interrupt,
 };
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+       if (adev->sysvm.sysvm_funcs == NULL)
+               adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
 }
 
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 15f2c0f..2329bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,7 +39,7 @@
 
 #include "amdgpu_atombios.h"
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v7_0_wait_for_idle(void *handle);
 
@@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device 
*adev)
        return 0;
 }
 
-static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
                                       struct amdgpu_mc *mc)
 {
        u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device 
*adev,
                mc->mc_vram_size = 0xFFC0000000ULL;
        }
        amdgpu_vram_location(adev, &adev->mc, base);
-       adev->mc.gtt_base_align = 0;
-       amdgpu_gtt_location(adev, mc);
+       adev->mc.sysvm_base_align = 0;
+       amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
        if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
                adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-       amdgpu_gart_set_defaults(adev);
-       gmc_v7_0_vram_gtt_location(adev, &adev->mc);
+       amdgpu_sysvm_set_defaults(adev);
+       gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
 
        return 0;
 }
@@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, 
bool enable)
 }
 
 /**
- * gmc_v7_0_gart_enable - gart enable
+ * gmc_v7_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, 
bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
 {
        int r, i;
        u32 tmp;
 
-       if (adev->gart.robj == NULL) {
+       if (adev->sysvm.robj == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gart_table_vram_pin(adev);
+       r = amdgpu_sysvm_table_vram_pin(adev);
        if (r)
                return r;
        /* Setup TLB control */
@@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
        WREG32(mmVM_L2_CNTL3, tmp);
        /* setup context0 */
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 
12);
        WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(adev->dummy_page.addr >> 12));
        WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device 
*adev)
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
                else
                        WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
        }
 
        /* enable context1-15 */
@@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->mc.gtt_size >> 20),
-                (unsigned long long)adev->gart.table_addr);
-       adev->gart.ready = true;
+                (unsigned)(adev->mc.sysvm_size >> 20),
+                (unsigned long long)adev->sysvm.table_addr);
+       adev->sysvm.ready = true;
        return 0;
 }
 
@@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->gart.robj) {
+       if (adev->sysvm.robj) {
                WARN(1, "R600 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
-       r = amdgpu_gart_init(adev);
+       r = amdgpu_sysvm_init(adev);
        if (r)
                return r;
-       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-       adev->gart.gart_pte_flags = 0;
-       return amdgpu_gart_table_vram_alloc(adev);
+       adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+       adev->sysvm.sysvm_pte_flags = 0;
+       return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v7_0_gart_disable - gart disable
+ * gmc_v7_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device 
*adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
        WREG32(mmVM_L2_CNTL, tmp);
        WREG32(mmVM_L2_CNTL2, 0);
-       amdgpu_gart_table_vram_unpin(adev);
+       amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device 
*adev)
  */
 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 {
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
+       amdgpu_sysvm_table_vram_free(adev);
+       amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v7_0_set_gart_funcs(adev);
+       gmc_v7_0_set_sysvm_funcs(adev);
        gmc_v7_0_set_irq_funcs(adev);
 
        adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
                }
        }
 
-       r = gmc_v7_0_gart_enable(adev);
+       r = gmc_v7_0_sysvm_enable(adev);
        if (r)
                return r;
 
@@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-       gmc_v7_0_gart_disable(adev);
+       gmc_v7_0_sysvm_disable(adev);
 
        return 0;
 }
@@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
        .set_powergating_state = gmc_v7_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
        .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
        .set_prt = gmc_v7_0_set_prt,
@@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs 
gmc_v7_0_irq_funcs = {
        .process = gmc_v7_0_process_interrupt,
 };
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+       if (adev->sysvm.sysvm_funcs == NULL)
+               adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
 }
 
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 213af65..cf8f8d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,7 +41,7 @@
 #include "amdgpu_atombios.h"
 
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v8_0_wait_for_idle(void *handle);
 
@@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct 
amdgpu_device *adev)
        return 0;
 }
 
-static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
                                       struct amdgpu_mc *mc)
 {
        u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device 
*adev,
                mc->mc_vram_size = 0xFFC0000000ULL;
        }
        amdgpu_vram_location(adev, &adev->mc, base);
-       adev->mc.gtt_base_align = 0;
-       amdgpu_gtt_location(adev, mc);
+       adev->mc.sysvm_base_align = 0;
+       amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
        if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
                adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-       amdgpu_gart_set_defaults(adev);
-       gmc_v8_0_vram_gtt_location(adev, &adev->mc);
+       amdgpu_sysvm_set_defaults(adev);
+       gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
 
        return 0;
 }
@@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, 
bool enable)
 }
 
 /**
- * gmc_v8_0_gart_enable - gart enable
+ * gmc_v8_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, 
bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
 {
        int r, i;
        u32 tmp;
 
-       if (adev->gart.robj == NULL) {
+       if (adev->sysvm.robj == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gart_table_vram_pin(adev);
+       r = amdgpu_sysvm_table_vram_pin(adev);
        if (r)
                return r;
        /* Setup TLB control */
@@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 
VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
        WREG32(mmVM_L2_CNTL4, tmp);
        /* setup context0 */
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 
12);
        WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(adev->dummy_page.addr >> 12));
        WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device 
*adev)
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
                else
                        WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-                              adev->gart.table_addr >> 12);
+                              adev->sysvm.table_addr >> 12);
        }
 
        /* enable context1-15 */
@@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->mc.gtt_size >> 20),
-                (unsigned long long)adev->gart.table_addr);
-       adev->gart.ready = true;
+                (unsigned)(adev->mc.sysvm_size >> 20),
+                (unsigned long long)adev->sysvm.table_addr);
+       adev->sysvm.ready = true;
        return 0;
 }
 
@@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->gart.robj) {
+       if (adev->sysvm.robj) {
                WARN(1, "R600 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
-       r = amdgpu_gart_init(adev);
+       r = amdgpu_sysvm_init(adev);
        if (r)
                return r;
-       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-       adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
-       return amdgpu_gart_table_vram_alloc(adev);
+       adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+       adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
+       return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v8_0_gart_disable - gart disable
+ * gmc_v8_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device 
*adev)
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
        WREG32(mmVM_L2_CNTL, tmp);
        WREG32(mmVM_L2_CNTL2, 0);
-       amdgpu_gart_table_vram_unpin(adev);
+       amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device 
*adev)
  */
 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 {
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
+       amdgpu_sysvm_table_vram_free(adev);
+       amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v8_0_set_gart_funcs(adev);
+       gmc_v8_0_set_sysvm_funcs(adev);
        gmc_v8_0_set_irq_funcs(adev);
 
        adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
                }
        }
 
-       r = gmc_v8_0_gart_enable(adev);
+       r = gmc_v8_0_sysvm_enable(adev);
        if (r)
                return r;
 
@@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-       gmc_v8_0_gart_disable(adev);
+       gmc_v8_0_sysvm_disable(adev);
 
        return 0;
 }
@@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
        .get_clockgating_state = gmc_v8_0_get_clockgating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
        .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
        .set_prt = gmc_v8_0_set_prt,
@@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs 
gmc_v8_0_irq_funcs = {
        .process = gmc_v8_0_process_interrupt,
 };
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+       if (adev->sysvm.sysvm_funcs == NULL)
+               adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
 }
 
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dbb43d9..f067465 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, 
u64 addr)
        return addr;
 }
 
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
        .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
        .get_invalidate_req = gmc_v9_0_get_invalidate_req,
@@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs 
= {
        .get_vm_pde = gmc_v9_0_get_vm_pde
 };
 
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-       if (adev->gart.gart_funcs == NULL)
-               adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
+       if (adev->sysvm.sysvm_funcs == NULL)
+               adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
 }
 
 static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v9_0_set_gart_funcs(adev);
+       gmc_v9_0_set_sysvm_funcs(adev);
        gmc_v9_0_set_irq_funcs(adev);
 
        return 0;
@@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
        return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 }
 
-static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
                                        struct amdgpu_mc *mc)
 {
        u64 base = 0;
        if (!amdgpu_sriov_vf(adev))
                base = mmhub_v1_0_get_fb_location(adev);
        amdgpu_vram_location(adev, &adev->mc, base);
-       adev->mc.gtt_base_align = 0;
-       amdgpu_gtt_location(adev, mc);
+       adev->mc.sysvm_base_align = 0;
+       amdgpu_sysvm_location(adev, mc);
        /* base offset of vram pages */
        if (adev->flags & AMD_IS_APU)
                adev->vm_manager.vram_base_offset = 
gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
        if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
                adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-       amdgpu_gart_set_defaults(adev);
-       gmc_v9_0_vram_gtt_location(adev, &adev->mc);
+       amdgpu_sysvm_set_defaults(adev);
+       gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
 
        return 0;
 }
@@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->gart.robj) {
+       if (adev->sysvm.robj) {
                WARN(1, "VEGA10 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
-       r = amdgpu_gart_init(adev);
+       r = amdgpu_sysvm_init(adev);
        if (r)
                return r;
-       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-       adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
+       adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+       adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
                                 AMDGPU_PTE_EXECUTABLE;
-       return amdgpu_gart_table_vram_alloc(adev);
+       return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 static int gmc_v9_0_sw_init(void *handle)
@@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
  */
 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
 {
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
+       amdgpu_sysvm_table_vram_free(adev);
+       amdgpu_sysvm_fini(adev);
 }
 
 static int gmc_v9_0_sw_fini(void *handle)
@@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct 
amdgpu_device *adev)
 }
 
 /**
- * gmc_v9_0_gart_enable - gart enable
+ * gmc_v9_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  */
-static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
 {
        int r;
        bool value;
@@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device 
*adev)
                golden_settings_vega10_hdp,
                (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
 
-       if (adev->gart.robj == NULL) {
+       if (adev->sysvm.robj == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gart_table_vram_pin(adev);
+       r = amdgpu_sysvm_table_vram_pin(adev);
        if (r)
                return r;
 
@@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device 
*adev)
                break;
        }
 
-       r = gfxhub_v1_0_gart_enable(adev);
+       r = gfxhub_v1_0_sysvm_enable(adev);
        if (r)
                return r;
 
-       r = mmhub_v1_0_gart_enable(adev);
+       r = mmhub_v1_0_sysvm_enable(adev);
        if (r)
                return r;
 
@@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->mc.gtt_size >> 20),
-                (unsigned long long)adev->gart.table_addr);
-       adev->gart.ready = true;
+                (unsigned)(adev->mc.sysvm_size >> 20),
+                (unsigned long long)adev->sysvm.table_addr);
+       adev->sysvm.ready = true;
        return 0;
 }
 
@@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
        /* The sequence of these two function calls matters.*/
        gmc_v9_0_init_golden_registers(adev);
 
-       r = gmc_v9_0_gart_enable(adev);
+       r = gmc_v9_0_sysvm_enable(adev);
 
        return r;
 }
 
 /**
- * gmc_v9_0_gart_disable - gart disable
+ * gmc_v9_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table.
  */
-static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
 {
-       gfxhub_v1_0_gart_disable(adev);
-       mmhub_v1_0_gart_disable(adev);
-       amdgpu_gart_table_vram_unpin(adev);
+       gfxhub_v1_0_sysvm_disable(adev);
+       mmhub_v1_0_sysvm_disable(adev);
+       amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static int gmc_v9_0_hw_fini(void *handle)
@@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
        }
 
        amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-       gmc_v9_0_gart_disable(adev);
+       gmc_v9_0_sysvm_disable(adev);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 9804318..fbc8f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device 
*adev)
 {
        uint64_t value;
 
-       BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-       value = adev->gart.table_addr - adev->mc.vram_start +
+       BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+       value = adev->sysvm.table_addr - adev->mc.vram_start +
                adev->vm_manager.vram_base_offset;
        value &= 0x0000FFFFFFFFF000ULL;
        value |= 0x1; /* valid bit */
@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct 
amdgpu_device *adev)
        mmhub_v1_0_init_gart_pt_regs(adev);
 
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-                    (u32)(adev->mc.gtt_start >> 12));
+                    (u32)(adev->mc.sysvm_start >> 12));
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-                    (u32)(adev->mc.gtt_start >> 44));
+                    (u32)(adev->mc.sysvm_start >> 44));
 
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-                    (u32)(adev->mc.gtt_end >> 12));
+                    (u32)(adev->mc.sysvm_end >> 12));
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-                    (u32)(adev->mc.gtt_end >> 44));
+                    (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device 
*adev,
        }
 }
 
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
        if (amdgpu_sriov_vf(adev)) {
                /*
@@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
        u32 tmp;
        u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940..23128e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,8 +24,8 @@
 #define __MMHUB_V1_0_H__
 
 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
                                         bool value);
 void mmhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4a65697..056b169 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
                                         unsigned vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958..95913fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
                                        unsigned vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        uint32_t data0, data1, mask;
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
                         unsigned int vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb..b869423 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring 
*ring,
                         unsigned int vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88..2ca49af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
                                        unsigned vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        uint32_t data0, data1, mask;
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
                         unsigned int vm_id, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
        pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to