drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h                         |   17 
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c                   |  251 
++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c                         |   10 
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c                            |   13 
 drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c                     |    6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c                            |   16 
 drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c                            |   12 
 drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h                            |    3 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c                            |   88 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h                            |    3 
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h                            |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c                         |   40 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h                         |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c                            |   24 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h                            |    6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c                           |    1 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c                             |   79 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h                             |    6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c                        |    2 
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c                              |    4 
 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c                             |    3 
 drivers/gpu/drm/amd/amdgpu/soc21.c                                 |    1 
 drivers/gpu/drm/amd/amdkfd/kfd_process.c                           |   24 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c                  |    1 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c                |    8 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c          |    2 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c    |    4 
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c                |    8 
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c                |    8 
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c                |    8 
 drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h                   |    2 
 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h |    2 
 drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h                       |    3 
 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h                       |    4 
 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c                     |   17 
 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c               |  113 ++++
 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c               |   42 +
 drivers/gpu/drm/i915/display/g4x_dp.c                              |    4 
 drivers/gpu/drm/i915/display/g4x_hdmi.c                            |   25 
 drivers/gpu/drm/i915/display/intel_dp.c                            |   59 --
 drivers/gpu/drm/i915/gem/i915_gem_object.c                         |    3 
 drivers/gpu/drm/i915/gem/i915_gem_object_types.h                   |   10 
 drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c                         |   18 
 drivers/gpu/drm/i915/gt/intel_gt_mcr.c                             |    2 
 drivers/gpu/drm/i915/gt/intel_migrate.c                            |   53 +-
 drivers/gpu/drm/i915/i915_perf.c                                   |    6 
 drivers/gpu/drm/i915/intel_uncore.c                                |    4 
 47 files changed, 647 insertions(+), 372 deletions(-)

New commits:
commit 991f428b267e86129f40ccac7ff0f81bf622cfb1
Merge: a6f2f56ccb44 fe8f5b2f7bec
Author: Kevin Brace <kevinbr...@gmx.com>
Date:   Sat Dec 24 20:40:12 2022 -0600

    Merge tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm 
into drm-next-6.2
    
    drm fixes for 6.2-rc1
    
    amdgpu:
    - Spelling fix
    - BO pin fix
    - Properly handle polaris 10/11 overlap asics
    - GMC9 fix
    - SR-IOV suspend fix
    - DCN 3.1.4 fix
    - KFD userptr locking fix
    - SMU13.x fixes
    - GDS/GWS/OA handling fix
    - Reserved VMID handling fixes
    - FRU EEPROM fix
    - BO validation fixes
    - Avoid large variable on the stack
    - S0ix fixes
    - SMU 13.x fixes
    - VCN fix
    - Add missing fence reference
    
    amdkfd:
    - Fix init vm error handling
    - Fix double release of compute pasid
    
    i915
    - Documentation fixes
    - OA-perf related fix
    - VLV/CHV HDMI/DP audio fix
    - Display DDI/Transcoder fix
    - Migrate fixes

commit fe8f5b2f7bec504021b395d24f7efca415d21e2b
Merge: 38624d2c972e c1c4a8b21721
Author: Dave Airlie <airl...@redhat.com>
Date:   Thu Dec 22 11:02:55 2022 +1000

    Merge tag 'amd-drm-fixes-6.2-2022-12-21' of 
https://gitlab.freedesktop.org/agd5f/linux into drm-next
    
    amd-drm-fixes-6.2-2022-12-21:
    
    amdgpu:
    - Avoid large variable on the stack
    - S0ix fixes
    - SMU 13.x fixes
    - VCN fix
    - Add missing fence reference
    
    amdkfd:
    - Fix init vm error handling
    - Fix double release of compute pasid
    
    Signed-off-by: Dave Airlie <airl...@redhat.com>
    From: Alex Deucher <alexander.deuc...@amd.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221221205828.6093-1-alexander.deuc...@amd.com

commit c1c4a8b217213c1924eabf4f28385bbee9cc50c1
Author: Christian König <ckoenig.leichtzumer...@gmail.com>
Date:   Mon Dec 19 11:47:18 2022 +0100

    drm/amdgpu: grab extra fence reference for drm_sched_job_add_dependency
    
    That function consumes the reference.
    
    Reviewed-by: Luben Tuikov <luben.tui...@amd.com>
    Reported-by: Borislav Petkov (AMD) <b...@alien8.de>
    Tested-by: Borislav Petkov (AMD) <b...@alien8.de>
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Fixes: aab9cf7b6954 ("drm/amdgpu: use scheduler dependencies for VM 
updates")
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 59cf64216fbb..535cd6569bcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -238,8 +238,10 @@ static int amdgpu_vm_sdma_update(struct 
amdgpu_vm_update_params *p,
        /* Wait for PD/PT moves to be completed */
        dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
        dma_resv_for_each_fence_unlocked(&cursor, fence) {
+               dma_fence_get(fence);
                r = drm_sched_job_add_dependency(&p->job->base, fence);
                if (r) {
+                       dma_fence_put(fence);
                        dma_resv_iter_end(&cursor);
                        return r;
                }
commit e1d900df63adcb748905131dd6258e570e11aed1
Author: Saleemkhan Jamadar <saleemkhan.jama...@amd.com>
Date:   Tue Dec 20 13:21:44 2022 +0530

    drm/amdgpu: enable VCN DPG for GC IP v11.0.4
    
    Enable VCN Dynamic Power Gating control for GC IP v11.0.4.
    
    Signed-off-by: Saleemkhan Jamadar <saleemkhan.jama...@amd.com>
    Reviewed-by: Veerabadhran Gopalakrishnan 
<veerabadhran.gopalakrish...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0, 6.1

diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c 
b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 7d5fdf450d0c..5562670b7b52 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -666,6 +666,7 @@ static int soc21_common_early_init(void *handle)
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG;
                adev->pg_flags = AMD_PG_SUPPORT_VCN |
+                       AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_GFX_PG |
                        AMD_PG_SUPPORT_JPEG;
                adev->external_rev_id = adev->rev_id + 0x1;
commit 8660495a9c5b9afeec4cc006b3b75178f0fb2f10
Author: Tim Huang <tim.hu...@amd.com>
Date:   Mon Dec 19 18:32:32 2022 +0800

    drm/amdgpu: skip mes self test after s0i3 resume for MES IP v11.0
    
    MES is part of gfxoff and MES suspend and resume are skipped for S0i3.
    But the mes_self_test call path is still in the amdgpu_device_ip_late_init.
    it's should also be skipped for s0ix as no hardware re-initialization
    happened.
    
    Besides, mes_self_test will free the BO that triggers a lot of warning
    messages while in the suspend state.
    
    [   81.656085] WARNING: CPU: 2 PID: 1550 at 
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c:425 amdgpu_bo_free_kernel+0xfc/0x110 
[amdgpu]
    [   81.679435] Call Trace:
    [   81.679726]  <TASK>
    [   81.679981]  amdgpu_mes_remove_hw_queue+0x17a/0x230 [amdgpu]
    [   81.680857]  amdgpu_mes_self_test+0x390/0x430 [amdgpu]
    [   81.681665]  mes_v11_0_late_init+0x37/0x50 [amdgpu]
    [   81.682423]  amdgpu_device_ip_late_init+0x53/0x280 [amdgpu]
    [   81.683257]  amdgpu_device_resume+0xae/0x2a0 [amdgpu]
    [   81.684043]  amdgpu_pmops_resume+0x37/0x70 [amdgpu]
    [   81.684818]  pci_pm_resume+0x5c/0xa0
    [   81.685247]  ? pci_pm_thaw+0x90/0x90
    [   81.685658]  dpm_run_callback+0x4e/0x160
    [   81.686110]  device_resume+0xad/0x210
    [   81.686529]  async_resume+0x1e/0x40
    [   81.686931]  async_run_entry_fn+0x33/0x120
    [   81.687405]  process_one_work+0x21d/0x3f0
    [   81.687869]  worker_thread+0x4a/0x3c0
    [   81.688293]  ? process_one_work+0x3f0/0x3f0
    [   81.688777]  kthread+0xff/0x130
    [   81.689157]  ? kthread_complete_and_exit+0x20/0x20
    [   81.689707]  ret_from_fork+0x22/0x30
    [   81.690118]  </TASK>
    [   81.690380] ---[ end trace 0000000000000000 ]---
    
    v2: make the comment clean and use adev->in_s0ix instead of
    adev->suspend
    
    Signed-off-by: Tim Huang <tim.hu...@amd.com>
    Reviewed-by: Mario Limonciello <mario.limoncie...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0, 6.1

diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 5459366f49ff..970b066b37bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1342,7 +1342,8 @@ static int mes_v11_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!amdgpu_in_reset(adev) &&
+       /* it's only intended for use in mes_self_test case, not for s0ix and 
reset */
+       if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
            (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
                amdgpu_mes_self_test(adev);
 
commit e73fc71e8f015d61f3adca7659cb209fd5117aa5
Author: Evan Quan <evan.q...@amd.com>
Date:   Fri Dec 16 17:04:24 2022 +0800

    drm/amd/pm: correct the fan speed retrieving in PWM for some SMU13 asics
    
    For SMU 13.0.0 and 13.0.7, the output from PMFW is in percent. Driver
    need to convert that into correct PMW(255) based.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Acked-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0, 6.1

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 713fb6ad39f6..9643b21c636a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1441,12 +1441,23 @@ out:
 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
                                         uint32_t *speed)
 {
+       int ret;
+
        if (!speed)
                return -EINVAL;
 
-       return smu_v13_0_0_get_smu_metrics_data(smu,
-                                               METRICS_CURR_FANPWM,
-                                               speed);
+       ret = smu_v13_0_0_get_smu_metrics_data(smu,
+                                              METRICS_CURR_FANPWM,
+                                              speed);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+               return ret;
+       }
+
+       /* Convert the PMFW output which is in percent to pwm(255) based */
+       *speed = MIN(*speed * 255 / 100, 255);
+
+       return 0;
 }
 
 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 07e3dc18c8b8..5c6c6ad011ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1363,12 +1363,23 @@ static int smu_v13_0_7_populate_umd_state_clk(struct 
smu_context *smu)
 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
                                         uint32_t *speed)
 {
+       int ret;
+
        if (!speed)
                return -EINVAL;
 
-       return smu_v13_0_7_get_smu_metrics_data(smu,
-                                               METRICS_CURR_FANPWM,
-                                               speed);
+       ret = smu_v13_0_7_get_smu_metrics_data(smu,
+                                              METRICS_CURR_FANPWM,
+                                              speed);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+               return ret;
+       }
+
+       /* Convert the PMFW output which is in percent to pwm(255) based */
+       *speed = MIN(*speed * 255 / 100, 255);
+
+       return 0;
 }
 
 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
commit 272b981416f8be0180c4d8066f90635fa7c1c501
Author: Evan Quan <evan.q...@amd.com>
Date:   Thu Dec 15 13:38:46 2022 +0800

    drm/amd/pm: bump SMU13.0.0 driver_if header to version 0x34
    
    To fit the latest PMFW and suppress the warning emerged on driver loading.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Acked-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0, 6.1

diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index b76f0f7e4299..d6b964cf73bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -522,9 +522,9 @@ typedef enum  {
   TEMP_HOTSPOT_M,
   TEMP_MEM,
   TEMP_VR_GFX,
+  TEMP_VR_SOC,
   TEMP_VR_MEM0,
   TEMP_VR_MEM1,
-  TEMP_VR_SOC,
   TEMP_VR_U,
   TEMP_LIQUID0,
   TEMP_LIQUID1,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ea29ac6a80e6..e8c6febb8b64 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,6 +28,7 @@
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e3a80ac987df..e54b760b875b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -290,6 +290,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
                break;
        case IP_VERSION(13, 0, 0):
+               smu->smc_driver_if_version = 
SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0;
+               break;
        case IP_VERSION(13, 0, 10):
                smu->smc_driver_if_version = 
SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
                break;
commit afa6646b1c5d3affd541f76bd7476e4b835a9174
Author: Alex Deucher <alexander.deuc...@amd.com>
Date:   Fri Dec 16 11:42:20 2022 -0500

    drm/amdgpu: skip MES for S0ix as well since it's part of GFX
    
    It's also part of gfxoff.
    
    Cc: sta...@vger.kernel.org # 6.0, 6.1
    Reviewed-by: Mario Limonciello <mario.limoncie...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 64660a41d53c..afe6af9c0138 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3016,14 +3016,15 @@ static int amdgpu_device_ip_suspend_phase2(struct 
amdgpu_device *adev)
                        continue;
                }
 
-               /* skip suspend of gfx and psp for S0ix
+               /* skip suspend of gfx/mes and psp for S0ix
                 * gfx is in gfxoff state, so on resume it will exit gfxoff just
                 * like at runtime. PSP is also part of the always on hardware
                 * so no need to suspend it.
                 */
                if (adev->in_s0ix &&
                    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP 
||
-                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX 
||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
                        continue;
 
                /* XXX handle errors */
commit d118b18fb1da02b41df2da78cb2794b3638d89cd
Author: Arnd Bergmann <a...@arndb.de>
Date:   Thu Dec 15 17:36:31 2022 +0100

    drm/amd/pm: avoid large variable on kernel stack
    
    The activity_monitor_external[] array is too big to fit on the
    kernel stack, resulting in this warning with clang:
    
    drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0_7_ppt.c:1438:12: 
error: stack frame size (1040) exceeds limit (1024) in 
'smu_v13_0_7_get_power_profile_mode' [-Werror,-Wframe-larger-than]
    
    Use dynamic allocation instead. It should also be possible to
    have single element here instead of the array, but this seems
    easier.
    
    v2: fix up argument to sizeof() (Alex)
    
    Fixes: 334682ae8151 ("drm/amd/pm: enable workload type change on 
smu_v13_0_7")
    Signed-off-by: Arnd Bergmann <a...@arndb.de>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index ab1c004606be..07e3dc18c8b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1440,7 +1440,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context 
*smu,
 
 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char 
*buf)
 {
-       DpmActivityMonitorCoeffIntExternal_t 
activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT];
+       DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
        uint32_t i, j, size = 0;
        int16_t workload_type = 0;
        int result = 0;
@@ -1448,6 +1448,12 @@ static int smu_v13_0_7_get_power_profile_mode(struct 
smu_context *smu, char *buf
        if (!buf)
                return -EINVAL;
 
+       activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
+                                           sizeof(*activity_monitor_external),
+                                           GFP_KERNEL);
+       if (!activity_monitor_external)
+               return -ENOMEM;
+
        size += sysfs_emit_at(buf, size, "                              ");
        for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
                size += sysfs_emit_at(buf, size, "%-14s%s", 
amdgpu_pp_profile_name[i],
@@ -1460,15 +1466,17 @@ static int smu_v13_0_7_get_power_profile_mode(struct 
smu_context *smu, char *buf
                workload_type = smu_cmn_to_asic_specific_index(smu,
                                                               
CMN2ASIC_MAPPING_WORKLOAD,
                                                               i);
-               if (workload_type < 0)
-                       return -EINVAL;
+               if (workload_type < 0) {
+                       result = -EINVAL;
+                       goto out;
+               }
 
                result = smu_cmn_update_table(smu,
                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF, 
workload_type,
                                          (void 
*)(&activity_monitor_external[i]), false);
                if (result) {
                        dev_err(smu->adev->dev, "[%s] Failed to get activity 
monitor!", __func__);
-                       return result;
+                       goto out;
                }
        }
 
@@ -1496,7 +1504,10 @@ do {                                                     
                                                \
        PRINT_DPM_MONITOR(Fclk_BoosterFreq);
 #undef PRINT_DPM_MONITOR
 
-       return size;
+       result = size;
+out:
+       kfree(activity_monitor_external);
+       return result;
 }
 
 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long 
*input, uint32_t size)
commit 1a799c4c190ea9f0e81028e3eb3037ed0ab17ff5
Author: Philip Yang <philip.y...@amd.com>
Date:   Tue Dec 13 00:50:03 2022 -0500

    drm/amdkfd: Fix double release compute pasid
    
    If kfd_process_device_init_vm returns failure after vm is converted to
    compute vm and vm->pasid set to compute pasid, KFD will not take
    pdd->drm_file reference. As a result, drm close file handler maybe
    called to release the compute pasid before KFD process destroy worker to
    release the same pasid and set vm->pasid to zero, this generates below
    WARNING backtrace and NULL pointer access.
    
    Add helper amdgpu_amdkfd_gpuvm_set_vm_pasid and call it at the last step
    of kfd_process_device_init_vm, to ensure vm pasid is the original pasid
    if acquiring vm failed or is the compute pasid with pdd->drm_file
    reference taken to avoid double release same pasid.
    
     amdgpu: Failed to create process VM object
     ida_free called for id=32770 which is not allocated.
     WARNING: CPU: 57 PID: 72542 at ../lib/idr.c:522 ida_free+0x96/0x140
     RIP: 0010:ida_free+0x96/0x140
     Call Trace:
      amdgpu_pasid_free_delayed+0xe1/0x2a0 [amdgpu]
      amdgpu_driver_postclose_kms+0x2d8/0x340 [amdgpu]
      drm_file_free.part.13+0x216/0x270 [drm]
      drm_close_helper.isra.14+0x60/0x70 [drm]
      drm_release+0x6e/0xf0 [drm]
      __fput+0xcc/0x280
      ____fput+0xe/0x20
      task_work_run+0x96/0xc0
      do_exit+0x3d0/0xc10
    
     BUG: kernel NULL pointer dereference, address: 0000000000000000
     RIP: 0010:ida_free+0x76/0x140
     Call Trace:
      amdgpu_pasid_free_delayed+0xe1/0x2a0 [amdgpu]
      amdgpu_driver_postclose_kms+0x2d8/0x340 [amdgpu]
      drm_file_free.part.13+0x216/0x270 [drm]
      drm_close_helper.isra.14+0x60/0x70 [drm]
      drm_release+0x6e/0xf0 [drm]
      __fput+0xcc/0x280
      ____fput+0xe/0x20
      task_work_run+0x96/0xc0
      do_exit+0x3d0/0xc10
    
    Signed-off-by: Philip Yang <philip.y...@amd.com>
    Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 589939631ed4..0040deaf8a83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -270,8 +270,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct 
amdgpu_device *adev, bool is_
        (&((struct amdgpu_fpriv *)                                      \
                ((struct drm_file *)(drm_priv))->driver_priv)->vm)
 
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+                                    struct file *filp, u32 pasid);
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
-                                       struct file *filp, u32 pasid,
+                                       struct file *filp,
                                        void **process_info,
                                        struct dma_fence **ef);
 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 0a854bb8b47e..b15091d8310d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1429,10 +1429,9 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct 
amdgpu_bo *bo)
        amdgpu_bo_unreserve(bo);
 }
 
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
-                                          struct file *filp, u32 pasid,
-                                          void **process_info,
-                                          struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+                                    struct file *filp, u32 pasid)
+
 {
        struct amdgpu_fpriv *drv_priv;
        struct amdgpu_vm *avm;
@@ -1443,10 +1442,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct 
amdgpu_device *adev,
                return ret;
        avm = &drv_priv->vm;
 
-       /* Already a compute VM? */
-       if (avm->process_info)
-               return -EINVAL;
-
        /* Free the original amdgpu allocated pasid,
         * will be replaced with kfd allocated pasid.
         */
@@ -1455,14 +1450,36 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct 
amdgpu_device *adev,
                amdgpu_vm_set_pasid(adev, avm, 0);
        }
 
-       /* Convert VM into a compute VM */
-       ret = amdgpu_vm_make_compute(adev, avm);
+       ret = amdgpu_vm_set_pasid(adev, avm, pasid);
        if (ret)
                return ret;
 
-       ret = amdgpu_vm_set_pasid(adev, avm, pasid);
+       return 0;
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+                                          struct file *filp,
+                                          void **process_info,
+                                          struct dma_fence **ef)
+{
+       struct amdgpu_fpriv *drv_priv;
+       struct amdgpu_vm *avm;
+       int ret;
+
+       ret = amdgpu_file_to_fpriv(filp, &drv_priv);
        if (ret)
                return ret;
+       avm = &drv_priv->vm;
+
+       /* Already a compute VM? */
+       if (avm->process_info)
+               return -EINVAL;
+
+       /* Convert VM into a compute VM */
+       ret = amdgpu_vm_make_compute(adev, avm);
+       if (ret)
+               return ret;
+
        /* Initialize KFD part of the VM and process info */
        ret = init_kfd_vm(avm, process_info, ef);
        if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 6caa9dd57ff1..51b1683ac5c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1576,9 +1576,9 @@ int kfd_process_device_init_vm(struct kfd_process_device 
*pdd,
        p = pdd->process;
        dev = pdd->dev;
 
-       ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
-               dev->adev, drm_file, p->pasid,
-               &p->kgd_process_info, &p->ef);
+       ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+                                                    &p->kgd_process_info,
+                                                    &p->ef);
        if (ret) {
                pr_err("Failed to create process VM object\n");
                return ret;
@@ -1593,10 +1593,16 @@ int kfd_process_device_init_vm(struct 
kfd_process_device *pdd,
        if (ret)
                goto err_init_cwsr;
 
+       ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+       if (ret)
+               goto err_set_pasid;
+
        pdd->drm_file = drm_file;
 
        return 0;
 
+err_set_pasid:
+       kfd_process_device_destroy_cwsr_dgpu(pdd);
 err_init_cwsr:
        kfd_process_device_destroy_ib_mem(pdd);
 err_reserve_ib_mem:
commit 29d48b87db64b6697ddad007548e51d032081c59
Author: Philip Yang <philip.y...@amd.com>
Date:   Wed Dec 14 10:15:17 2022 -0500

    drm/amdkfd: Fix kfd_process_device_init_vm error handling
    
    Should only destroy the ib_mem and let process cleanup worker to free
    the outstanding BOs. Reset the pointer in pdd->qpd structure, to avoid
    NULL pointer access in process destroy worker.
    
     BUG: kernel NULL pointer dereference, address: 0000000000000010
     Call Trace:
      amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel+0x46/0xb0 [amdgpu]
      kfd_process_device_destroy_cwsr_dgpu+0x40/0x70 [amdgpu]
      kfd_process_destroy_pdds+0x71/0x190 [amdgpu]
      kfd_process_wq_release+0x2a2/0x3b0 [amdgpu]
      process_one_work+0x2a1/0x600
      worker_thread+0x39/0x3d0
    
    Signed-off-by: Philip Yang <philip.y...@amd.com>
    Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a26257171ab7..6caa9dd57ff1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -689,13 +689,13 @@ void kfd_process_destroy_wq(void)
 }
 
 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
-                       struct kfd_process_device *pdd, void *kptr)
+                       struct kfd_process_device *pdd, void **kptr)
 {
        struct kfd_dev *dev = pdd->dev;
 
-       if (kptr) {
+       if (kptr && *kptr) {
                amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
-               kptr = NULL;
+               *kptr = NULL;
        }
 
        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, 
pdd->drm_priv);
@@ -795,7 +795,7 @@ static void kfd_process_device_destroy_ib_mem(struct 
kfd_process_device *pdd)
        if (!qpd->ib_kaddr || !qpd->ib_base)
                return;
 
-       kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
+       kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
 }
 
 struct kfd_process *kfd_create_process(struct file *filep)
@@ -1277,7 +1277,7 @@ static void kfd_process_device_destroy_cwsr_dgpu(struct 
kfd_process_device *pdd)
        if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
                return;
 
-       kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
+       kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
 }
 
 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
@@ -1598,8 +1598,8 @@ int kfd_process_device_init_vm(struct kfd_process_device 
*pdd,
        return 0;
 
 err_init_cwsr:
+       kfd_process_device_destroy_ib_mem(pdd);
 err_reserve_ib_mem:
-       kfd_process_device_free_bos(pdd);
        pdd->drm_priv = NULL;
 
        return ret;
commit 38624d2c972e7692ca59d0d3b97741a3313addb3
Merge: 5504eb164eec ad0fca2dceea
Author: Dave Airlie <airl...@redhat.com>
Date:   Tue Dec 20 15:43:06 2022 +1000

    Merge tag 'drm-intel-next-fixes-2022-12-15' of 
git://anongit.freedesktop.org/drm/drm-intel into drm-next
    
    - Documentation fixe (Matt, Miaoqian)
    - OA-perf related fix (Umesh)
    - VLV/CHV HDMI/DP audio fix (Ville)
    - Display DDI/Transcoder fix (Khaled)
    - Migrate fixes (Chris, Matt)
    
    Signed-off-by: Dave Airlie <airl...@redhat.com>
    
    From: Rodrigo Vivi <rodrigo.v...@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/Y5uFYOJ/1jgf2...@intel.com

commit 5504eb164eecdcc1fcb7d7a3d05c29b3bbbcfa78
Merge: 66efff515a65 7a18e089eff0
Author: Dave Airlie <airl...@redhat.com>
Date:   Tue Dec 20 15:21:18 2022 +1000

    Merge tag 'amd-drm-fixes-6.2-2022-12-15' of 
https://gitlab.freedesktop.org/agd5f/linux into drm-next
    
    amd-drm-fixes-6.2-2022-12-15:
    
    amdgpu:
    - Spelling fix
    - BO pin fix
    - Properly handle polaris 10/11 overlap asics
    - GMC9 fix
    - SR-IOV suspend fix
    - DCN 3.1.4 fix
    - KFD userptr locking fix
    - SMU13.x fixes
    - GDS/GWS/OA handling fix
    - Reserved VMID handling fixes
    - FRU EEPROM fix
    - BO validation fixes
    
    Signed-off-by: Dave Airlie <airl...@redhat.com>
    From: Alex Deucher <alexander.deuc...@amd.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221215224936.6438-1-alexander.deuc...@amd.com

commit 7a18e089eff02f17eaee49fc18641f5d16a8284b
Author: Evan Quan <evan.q...@amd.com>
Date:   Mon Dec 5 15:33:31 2022 +0800

    drm/amd/pm: update SMU13.0.0 reported maximum shader clock
    
    Update the reported maximum shader clock to the value which can
    be guarded to be achieved on all cards. This is to align with
    Window setting.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index d689fcab963d..713fb6ad39f6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -543,6 +543,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct 
smu_context *smu)
                                                     dpm_table);
                if (ret)
                        return ret;
+
+               /*
+                * Update the reported maximum shader clock to the value
+                * which can be guarded to be achieved on all cards. This
+                * is aligned with Window setting. And considering that value
+                * might be not the peak frequency the card can achieve, it
+                * is normal some real-time clock frequency can overtake this
+                * labelled maximum clock frequency(for example in pp_dpm_sclk
+                * sysfs output).
+                */
+               if (skutable->DriverReportedClocks.GameClockAc &&
+                   (dpm_table->dpm_levels[dpm_table->count - 1].value >
+                   skutable->DriverReportedClocks.GameClockAc)) {
+                       dpm_table->dpm_levels[dpm_table->count - 1].value =
+                               skutable->DriverReportedClocks.GameClockAc;
+                       dpm_table->max = 
skutable->DriverReportedClocks.GameClockAc;
+               }
        } else {
                dpm_table->count = 1;
                dpm_table->dpm_levels[0].value = 
smu->smu_table.boot_values.gfxclk / 100;
@@ -805,6 +822,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct 
smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
+                                            enum smu_clk_type clk_type,
+                                            uint32_t *min,
+                                            uint32_t *max)
+{
+       struct smu_13_0_dpm_context *dpm_context =
+               smu->smu_dpm.dpm_context;
+       struct smu_13_0_dpm_table *dpm_table;
+
+       switch (clk_type) {
+       case SMU_MCLK:
+       case SMU_UCLK:
+               /* uclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.uclk_table;
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               /* gfxclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.gfx_table;
+               break;
+       case SMU_SOCCLK:
+               /* socclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.soc_table;
+               break;
+       case SMU_FCLK:
+               /* fclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.fclk_table;
+               break;
+       case SMU_VCLK:
+       case SMU_VCLK1:
+               /* vclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.vclk_table;
+               break;
+       case SMU_DCLK:
+       case SMU_DCLK1:
+               /* dclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.dclk_table;
+               break;
+       default:
+               dev_err(smu->adev->dev, "Unsupported clock type!\n");
+               return -EINVAL;
+       }
+
+       if (min)
+               *min = dpm_table->min;
+       if (max)
+               *max = dpm_table->max;
+
+       return 0;
+}
+
 static int smu_v13_0_0_read_sensor(struct smu_context *smu,
                                   enum amd_pp_sensors sensor,
                                   void *data,
@@ -1910,7 +1978,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = 
{
        .get_enabled_mask = smu_cmn_get_enabled_mask,
        .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
        .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
-       .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+       .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
        .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
        .read_sensor = smu_v13_0_0_read_sensor,
        .feature_is_enabled = smu_cmn_feature_is_enabled,
commit 32a7819ff8e25375c7515aaae5cfcb8c44a461b7
Author: Evan Quan <evan.q...@amd.com>
Date:   Mon Dec 5 14:53:34 2022 +0800

    drm/amd/pm: correct SMU13.0.0 pstate profiling clock settings
    
    Correct the pstate standard/peak profiling mode clock settings
    for SMU13.0.0.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index cc66828c7a84..d689fcab963d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1307,9 +1307,17 @@ static int smu_v13_0_0_populate_umd_state_clk(struct 
smu_context *smu)
                                &dpm_context->dpm_tables.fclk_table;
        struct smu_umd_pstate_table *pstate_table =
                                &smu->pstate_table;
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *pptable = table_context->driver_pptable;
+       DriverReportedClocks_t driver_clocks =
+                       pptable->SkuTable.DriverReportedClocks;
 
        pstate_table->gfxclk_pstate.min = gfx_table->min;
-       pstate_table->gfxclk_pstate.peak = gfx_table->max;
+       if (driver_clocks.GameClockAc &&
+           (driver_clocks.GameClockAc < gfx_table->max))
+               pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+       else
+               pstate_table->gfxclk_pstate.peak = gfx_table->max;
 
        pstate_table->uclk_pstate.min = mem_table->min;
        pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1326,12 +1334,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct 
smu_context *smu)
        pstate_table->fclk_pstate.min = fclk_table->min;
        pstate_table->fclk_pstate.peak = fclk_table->max;
 
-       /*
-        * For now, just use the mininum clock frequency.
-        * TODO: update them when the real pstate settings available
-        */
-       pstate_table->gfxclk_pstate.standard = gfx_table->min;
-       pstate_table->uclk_pstate.standard = mem_table->min;
+       if (driver_clocks.BaseClockAc &&
+           driver_clocks.BaseClockAc < gfx_table->max)
+               pstate_table->gfxclk_pstate.standard = 
driver_clocks.BaseClockAc;
+       else
+               pstate_table->gfxclk_pstate.standard = gfx_table->max;
+       pstate_table->uclk_pstate.standard = mem_table->max;
        pstate_table->socclk_pstate.standard = soc_table->min;
        pstate_table->vclk_pstate.standard = vclk_table->min;
        pstate_table->dclk_pstate.standard = dclk_table->min;
commit 62b9f835a6c60171845642afec4ce4b44865f10f
Author: Evan Quan <evan.q...@amd.com>
Date:   Fri Dec 2 14:03:45 2022 +0800

    drm/amd/pm: enable GPO dynamic control support for SMU13.0.7
    
    To better support UMD pstate profilings, the GPO feature needs
    to be switched on/off accordingly.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c270f94a1b86..ab1c004606be 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -123,6 +123,7 @@ static struct cmn2asic_msg_mapping 
smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(SetMGpuFanBoostLimitRpm,        
PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
        MSG_MAP(DFCstateControl,                
PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
        MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                
       0),
+       MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,          
 0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1690,6 +1691,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = 
{
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_7_set_mp1_state,
        .set_df_cstate = smu_v13_0_7_set_df_cstate,
+       .gpo_control = smu_v13_0_gpo_control,
 };
 
 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
commit 1794f6a9535bb5234c2b747d1bc6dad03249245a
Author: Evan Quan <evan.q...@amd.com>
Date:   Fri Dec 2 13:56:35 2022 +0800

    drm/amd/pm: enable GPO dynamic control support for SMU13.0.0
    
    To better support UMD pstate profilings, the GPO feature needs
    to be switched on/off accordingly.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a4e3425b1027..4180c71d930f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -241,7 +241,8 @@
        __SMU_DUMMY_MAP(GetGfxOffEntryCount),            \
        __SMU_DUMMY_MAP(LogGfxOffResidency),                    \
        __SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired),           \
-       __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel),
+       __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
+       __SMU_DUMMY_MAP(AllowGpo),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 865d6358918d..ea29ac6a80e6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -272,6 +272,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context 
*smu);
 
 int smu_v13_0_run_btc(struct smu_context *smu);
 
+int smu_v13_0_gpo_control(struct smu_context *smu,
+                         bool enablement);
+
 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement);
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f5e90e0a99df..e3a80ac987df 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2180,6 +2180,21 @@ int smu_v13_0_run_btc(struct smu_context *smu)
        return res;
 }
 
+int smu_v13_0_gpo_control(struct smu_context *smu,
+                         bool enablement)
+{
+       int res;
+
+       res = smu_cmn_send_smc_msg_with_param(smu,
+                                             SMU_MSG_AllowGpo,
+                                             enablement ? 1 : 0,
+                                             NULL);
+       if (res)
+               dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+       return res;
+}
+
 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement)
 {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 21d89c3302f1..cc66828c7a84 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -144,6 +144,7 @@ static struct cmn2asic_msg_mapping 
smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(SetNumBadMemoryPagesRetired,    
PPSMC_MSG_SetNumBadMemoryPagesRetired,   0),
        MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
                            PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,  
 0),
+       MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,          
 0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1949,6 +1950,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = 
{
        .set_df_cstate = smu_v13_0_0_set_df_cstate,
        .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
        .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
+       .gpo_control = smu_v13_0_gpo_control,
 };
 
 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
commit 47722220660cfb935e27e62d385959ecc296cddb
Author: Christian König <christian.koe...@amd.com>
Date:   Mon Dec 12 17:31:57 2022 +0100

    drm/amdgpu: revert "generally allow over-commit during BO allocation"
    
    This reverts commit f9d00a4a8dc8fff951c97b3213f90d6bc7a72175.
    
    This causes problem for KFD because when we overcommit we accidentially
    bind the BO to GTT for moving it into VRAM. We also need to make sure
    that this is done only as fallback after trying to evict first.
    
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 62e98f1ad770..a0780a4e3e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -113,7 +113,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, 
unsigned long size,
        bp.resv = resv;
        bp.preferred_domain = initial_domain;
        bp.flags = flags;
-       bp.domain = initial_domain | AMDGPU_GEM_DOMAIN_CPU;
+       bp.domain = initial_domain;
        bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_create_user(adev, &bp, &ubo);
@@ -332,10 +332,20 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void 
*data,
        }
 
        initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
-                                    initial_domain, flags, ttm_bo_type_device,
-                                    resv, &gobj);
+                                    initial_domain,
+                                    flags, ttm_bo_type_device, resv, &gobj);
        if (r && r != -ERESTARTSYS) {
+               if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                       flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       goto retry;
+               }
+
+               if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+                       initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+                       goto retry;
+               }
                DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, 
%d)\n",
                                size, initial_domain, args->in.alignment, r);
        }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0f6385941480..4e684c2afc70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -580,7 +580,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 
        bo->tbo.bdev = &adev->mman.bdev;
-       amdgpu_bo_placement_from_domain(bo, bp->domain);
+       if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
+                         AMDGPU_GEM_DOMAIN_GDS))
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+       else
+               amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
 
commit 3273f11675ef11959d25a56df3279f712bcd41b7
Author: Luben Tuikov <luben.tui...@amd.com>
Date:   Wed Dec 14 03:56:03 2022 -0500

    drm/amdgpu: Remove unnecessary domain argument
    
    Remove the "domain" argument to amdgpu_bo_create_kernel_at() since this
    function takes an "offset" argument which is the offset off of VRAM, and as
    such allocation always takes place in VRAM. Thus, the "domain" argument is
    unnecessary.
    
    Cc: Alex Deucher <alexander.deuc...@amd.com>
    Cc: Christian König <christian.koe...@amd.com>
    Cc: AMD Graphics <amd-...@lists.freedesktop.org>
    Signed-off-by: Luben Tuikov <luben.tui...@amd.com>
    Reviewed-by: Christian König <christian.koe...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 26c6e9b2d460..0f6385941480 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
  * @adev: amdgpu device object
  * @offset: offset of the BO
  * @size: size of the BO
- * @domain: where to place it
  * @bo_ptr:  used to initialize BOs in structures
  * @cpu_addr: optional CPU address mapping
  *
- * Creates a kernel BO at a specific offset in the address space of the domain.
+ * Creates a kernel BO at a specific offset in VRAM.
  *
  * Returns:
  * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
-                              uint64_t offset, uint64_t size, uint32_t domain,
+                              uint64_t offset, uint64_t size,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr)
 {
        struct ttm_operation_ctx ctx = { false, false };
@@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
        offset &= PAGE_MASK;
        size = ALIGN(size, PAGE_SIZE);
 
-       r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
-                                     NULL, cpu_addr);
+       r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
+                                     cpu_addr);
        if (r)
                return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 147b79c10cbb..93207badf83f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
                            u32 domain, struct amdgpu_bo **bo_ptr,
                            u64 *gpu_addr, void **cpu_addr);
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
-                              uint64_t offset, uint64_t size, uint32_t domain,
+                              uint64_t offset, uint64_t size,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr);
 int amdgpu_bo_create_user(struct amdgpu_device *adev,
                          struct amdgpu_bo_param *bp,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f0e4c7309438..55e0284b2bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1576,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct 
amdgpu_device *adev)
        return amdgpu_bo_create_kernel_at(adev,
                                          adev->mman.fw_vram_usage_start_offset,
                                          adev->mman.fw_vram_usage_size,
-                                         AMDGPU_GEM_DOMAIN_VRAM,
                                          &adev->mman.fw_vram_usage_reserved_bo,
                                          &adev->mman.fw_vram_usage_va);
 }
@@ -1602,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct 
amdgpu_device *adev)
        return amdgpu_bo_create_kernel_at(adev,
                                          
adev->mman.drv_vram_usage_start_offset,
                                          adev->mman.drv_vram_usage_size,
-                                         AMDGPU_GEM_DOMAIN_VRAM,
                                          
&adev->mman.drv_vram_usage_reserved_bo,
                                          &adev->mman.drv_vram_usage_va);
 }
@@ -1683,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device 
*adev)
                ret = amdgpu_bo_create_kernel_at(adev,
                                         ctx->c2p_train_data_offset,
                                         ctx->train_data_size,
-                                        AMDGPU_GEM_DOMAIN_VRAM,
                                         &ctx->c2p_bo,
                                         NULL);
                if (ret) {
@@ -1697,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device 
*adev)
        ret = amdgpu_bo_create_kernel_at(adev,
                                adev->gmc.real_vram_size - 
adev->mman.discovery_tmr_size,
                                adev->mman.discovery_tmr_size,
-                               AMDGPU_GEM_DOMAIN_VRAM,
                                &adev->mman.discovery_memory,
                                NULL);
        if (ret) {
@@ -1798,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         * avoid display artifacts while transitioning between pre-OS
         * and driver.  */
        r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_vga_memory,
                                       NULL);
        if (r)
                return r;
        r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
                                       adev->mman.stolen_extended_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_extended_memory,
                                       NULL);
        if (r)
                return r;
        r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
                                       adev->mman.stolen_reserved_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_reserved_memory,
                                       NULL);
        if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 15544f262ec1..2994b9db196f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct 
amdgpu_device *adev)
                 */
                if (amdgpu_bo_create_kernel_at(adev, bp << 
AMDGPU_GPU_PAGE_SHIFT,
                                               AMDGPU_GPU_PAGE_SIZE,
-                                              AMDGPU_GEM_DOMAIN_VRAM,
                                               &bo, NULL))
                        DRM_DEBUG("RAS WARN: reserve vram for retired page %llx 
fail\n", bp);
 
commit 7554886daa31eacc8e7fac9e15bbce67d10b8f1f
Author: Luben Tuikov <luben.tui...@amd.com>
Date:   Sat Dec 10 02:51:19 2022 -0500

    drm/amdgpu: Fix size validation for non-exclusive domains (v4)
    
    Fix amdgpu_bo_validate_size() to check whether the TTM domain manager for 
the
    requested memory exists, else we get a kernel oops when dereferencing "man".
    
    v2: Make the patch standalone, i.e. not dependent on local patches.
    v3: Preserve old behaviour and just check that the manager pointer is not
        NULL.
    v4: Complain if GTT domain requested and it is uninitialized--most likely a
        bug.
    
    Cc: Alex Deucher <alexander.deuc...@amd.com>
    Cc: Christian König <christian.koe...@amd.com>
    Cc: AMD Graphics <amd-...@lists.freedesktop.org>
    Signed-off-by: Luben Tuikov <luben.tui...@amd.com>
    Reviewed-by: Christian König <christian.koe...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3393c1a6a0ff..26c6e9b2d460 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -448,27 +448,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device 
*adev,
 
        /*
         * If GTT is part of requested domains the check must succeed to
-        * allow fall back to GTT
+        * allow fall back to GTT.
         */
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
                man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-               if (size < man->size)
+               if (man && size < man->size)
                        return true;
-               else
-                       goto fail;
-       }
-
-       if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+               else if (!man)
+                       WARN_ON_ONCE("GTT domain requested but GTT mem manager 
uninitialized");
+               goto fail;
+       } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
                man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-               if (size < man->size)
+               if (man && size < man->size)
                        return true;
-               else
-                       goto fail;
+               goto fail;
        }
 
-
        /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
        return true;
 
commit 28afcb0ad54c858d0f426b340e88e0277a375597
Author: Luben Tuikov <luben.tui...@amd.com>
Date:   Mon Dec 12 12:04:42 2022 -0500

    drm/amdgpu: Check if fru_addr is not NULL (v2)
    
    Always check if fru_addr is not NULL. This commit also fixes a "smatch"
    warning.
    
    v2: Add a Fixes tag.
    
    Cc: Alex Deucher <alexander.deuc...@amd.com>
    Cc: Dan Carpenter <erro...@gmail.com>
    Cc: kernel test robot <l...@intel.com>
    Cc: AMD Graphics <amd-...@lists.freedesktop.org>
    Fixes: afbe5d1e4bd7c7 ("drm/amdgpu: Bug-fix: Reading I2C FRU data on newer 
ASICs")
    Signed-off-by: Luben Tuikov <luben.tui...@amd.com>
    Reviewed-by: Kent Russell <kent.russ...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 2c38ac7bc643..4620c4712ce3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -64,7 +64,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device 
*adev, u32 *fru_addr)
                            sizeof(atom_ctx->vbios_version)) ||
                    strnstr(atom_ctx->vbios_version, "D163",
                            sizeof(atom_ctx->vbios_version))) {
-                       *fru_addr = FRU_EEPROM_MADDR_6;
+                       if (fru_addr)
+                               *fru_addr = FRU_EEPROM_MADDR_6;
                        return true;
                } else {
                        return false;
@@ -83,7 +84,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device 
*adev, u32 *fru_addr)
                                    sizeof(atom_ctx->vbios_version))) {
                                return false;
                        } else {
-                               *fru_addr = FRU_EEPROM_MADDR_6;
+                               if (fru_addr)
+                                       *fru_addr = FRU_EEPROM_MADDR_6;
                                return true;
                        }
                } else {
commit ad0fca2dceeab8fdd8e1135f4b4ef2dc46c2ead9
Author: Matthew Auld <matthew.a...@intel.com>
Date:   Mon Dec 12 17:19:58 2022 +0000

    drm/i915/ttm: consider CCS for backup objects
    
    It seems we can have one or more framebuffers that are still pinned when
    suspending lmem, in such a case we end up creating a shmem backup
    object, instead of evicting the object directly, but this will skip
    copying the CCS aux state, since we don't allocate the extra storage for
    the CCS pages as part of the ttm_tt construction. Since we can already
    deal with pinned objects just fine, it doesn't seem too nasty to just
    extend to support dealing with the CCS aux state, if the object is a
    pinned framebuffer. This fixes display corruption (like in gnome-shell)
    seen on DG2 when returning from suspend.
    
    Fixes: da0595ae91da ("drm/i915/migrate: Evict and restore the flatccs 
capable lmem obj")
    Signed-off-by: Matthew Auld <matthew.a...@intel.com>
    Cc: Ville Syrjälä <ville.syrj...@linux.intel.com>
    Cc: Nirmoy Das <nirmoy....@intel.com>
    Cc: Andrzej Hajda <andrzej.ha...@intel.com>
    Cc: Shuicheng Lin <shuicheng....@intel.com>
    Cc: <sta...@vger.kernel.org> # v5.19+
    Tested-by: Nirmoy Das <nirmoy....@intel.com>
    Reviewed-by: Nirmoy Das <nirmoy....@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221212171958.82593-2-matthew.a...@intel.com
    (cherry picked from commit 95df9cc24bee8a09d39c62bcef4319b984814e18)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 733696057761..1a0886b8aaa1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -785,6 +785,9 @@ bool i915_gem_object_needs_ccs_pages(struct 
drm_i915_gem_object *obj)
        if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
                return false;
 
+       if (obj->flags & I915_BO_ALLOC_CCS_AUX)
+               return true;
+
        for (i = 0; i < obj->mm.n_placements; i++) {
                /* Compression is not allowed for the objects with smem 
placement */
                if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d0d6772e6f36..ab4c2f90a564 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -327,16 +327,18 @@ struct drm_i915_gem_object {
  * dealing with userspace objects the CPU fault handler is free to ignore this.
  */
 #define I915_BO_ALLOC_GPU_ONLY   BIT(6)
+#define I915_BO_ALLOC_CCS_AUX    BIT(7)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
                             I915_BO_ALLOC_VOLATILE | \
                             I915_BO_ALLOC_CPU_CLEAR | \
                             I915_BO_ALLOC_USER | \
                             I915_BO_ALLOC_PM_VOLATILE | \
                             I915_BO_ALLOC_PM_EARLY | \
-                            I915_BO_ALLOC_GPU_ONLY)
-#define I915_BO_READONLY          BIT(7)
-#define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED         BIT(9)
+                            I915_BO_ALLOC_GPU_ONLY | \
+                            I915_BO_ALLOC_CCS_AUX)
+#define I915_BO_READONLY          BIT(8)
+#define I915_TILING_QUIRK_BIT     9 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED         BIT(10)
        /**
         * @mem_flags - Mutable placement-related flags
         *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 07e49f22f2de..7e67742bc65e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region 
*apply,
                container_of(bo->bdev, typeof(*i915), bdev);
        struct drm_i915_gem_object *backup;
        struct ttm_operation_ctx ctx = {};
+       unsigned int flags;
        int err = 0;
 
        if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region 
*apply,
        if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
                return 0;
 
-       backup = i915_gem_object_create_shmem(i915, obj->base.size);
+       /*
+        * It seems that we might have some framebuffers still pinned at this
+        * stage, but for such objects we might also need to deal with the CCS
+        * aux state. Make sure we force the save/restore of the CCS state,
+        * otherwise we might observe display corruption, when returning from
+        * suspend.
+        */
+       flags = 0;
+       if (i915_gem_object_needs_ccs_pages(obj)) {
+               WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
+               WARN_ON_ONCE(!pm_apply->allow_gpu);
+
+               flags = I915_BO_ALLOC_CCS_AUX;
+       }
+       backup = 
i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+                                              obj->base.size, 0, flags);
        if (IS_ERR(backup))
                return PTR_ERR(backup);
 
commit 952d19190c6d482ec725f22e8bc8646bc0189d41
Author: Matthew Auld <matthew.a...@intel.com>
Date:   Mon Dec 12 17:19:57 2022 +0000

    drm/i915/migrate: fix corner case in CCS aux copying
    
    In the case of lmem -> lmem transfers, which is currently only possible
    with small-bar systems, we need to ensure we copy the CCS aux state
    as-is, rather than nuke it. This should fix some nasty display
    corruption sometimes seen on DG2 small-bar systems, when also using
    DG2_RC_CCS_CC for the surface.
    
    Fixes: e3afc690188b ("drm/i915/display: consider DG2_RC_CCS_CC when 
migrating buffers")
    Signed-off-by: Matthew Auld <matthew.a...@intel.com>
    Cc: Ville Syrjälä <ville.syrj...@linux.intel.com>
    Cc: Nirmoy Das <nirmoy....@intel.com>
    Cc: Andrzej Hajda <andrzej.ha...@intel.com>
    Cc: Shuicheng Lin <shuicheng....@intel.com>
    Reviewed-by: Nirmoy Das <nirmoy....@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221212171958.82593-1-matthew.a...@intel.com
    (cherry picked from commit b29d26fbcb862526d5047caec82878be2eb75c0f)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c 
b/drivers/gpu/drm/i915/gt/intel_migrate.c
index b783f6f740c8..5fb74e71f27b 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -837,14 +837,35 @@ intel_context_migrate_copy(struct intel_context *ce,
                        if (err)
                                goto out_rq;
 
-                       /*
-                        * While we can't always restore/manage the CCS state,
-                        * we still need to ensure we don't leak the CCS state
-                        * from the previous user, so make sure we overwrite it
-                        * with something.
-                        */
-                       err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
-                                           dst_offset, DIRECT_ACCESS, len);
+                       if (src_is_lmem) {
+                               /*
+                                * If the src is already in lmem, then we must
+                                * be doing an lmem -> lmem transfer, and so
+                                * should be safe to directly copy the CCS
+                                * state. In this case we have either
+                                * initialised the CCS aux state when first
+                                * clearing the pages (since it is already
+                                * allocated in lmem), or the user has
+                                * potentially populated it, in which case we
+                                * need to copy the CCS state as-is.
+                                */
+                               err = emit_copy_ccs(rq,
+                                                   dst_offset, INDIRECT_ACCESS,
+                                                   src_offset, INDIRECT_ACCESS,
+                                                   len);
+                       } else {
+                               /*
+                                * While we can't always restore/manage the CCS
+                                * state, we still need to ensure we don't leak
+                                * the CCS state from the previous user, so make
+                                * sure we overwrite it with something.
+                                */
+                               err = emit_copy_ccs(rq,
+                                                   dst_offset, INDIRECT_ACCESS,
+                                                   dst_offset, DIRECT_ACCESS,
+                                                   len);
+                       }
+
                        if (err)
                                goto out_rq;
 
commit e44a0fe630c58b0a87d8281f5c1077a3479e5fce
Author: Christian König <christian.koe...@amd.com>
Date:   Fri Nov 25 17:04:25 2022 +0100

    drm/amdgpu: rework reserved VMID handling
    
    Instead of reserving a VMID for a single process allow that many
    processes use the reserved ID. This allows for proper isolation
    between the processes.
    
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 6949dfec75d5..fcb711a11a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -278,12 +278,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
+       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        uint64_t fence_context = adev->fence_context + ring->idx;
        bool needs_flush = vm->use_cpu_for_update;
        uint64_t updates = amdgpu_vm_tlb_seq(vm);
        int r;
 
-       *id = vm->reserved_vmid[vmhub];
+       *id = id_mgr->reserved;
        if ((*id)->owner != vm->immediate.fence_context ||
            !amdgpu_vmid_compatible(*id, job) ||
            (*id)->flushed_updates < updates ||
@@ -462,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               unsigned vmhub)
 {
-       struct amdgpu_vmid_mgr *id_mgr;
-       struct amdgpu_vmid *idle;
-       int r = 0;
+       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 
-       id_mgr = &adev->vm_manager.id_mgr[vmhub];
        mutex_lock(&id_mgr->lock);
        if (vm->reserved_vmid[vmhub])
                goto unlock;
-       if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
-           AMDGPU_VM_MAX_RESERVED_VMID) {
-               DRM_ERROR("Over limitation of reserved vmid\n");
-               atomic_dec(&id_mgr->reserved_vmid_num);
-               r = -EINVAL;
-               goto unlock;
+
+       ++id_mgr->reserved_use_count;
+       if (!id_mgr->reserved) {
+               struct amdgpu_vmid *id;
+
+               id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
+                                     list);
+               /* Remove from normal round robin handling */
+               list_del_init(&id->list);
+               id_mgr->reserved = id;
        }
-       /* Select the first entry VMID */
-       idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
-       list_del_init(&idle->list);
-       vm->reserved_vmid[vmhub] = idle;
-       mutex_unlock(&id_mgr->lock);
+       vm->reserved_vmid[vmhub] = true;
 
-       return 0;
 unlock:
        mutex_unlock(&id_mgr->lock);
-       return r;
+       return 0;
 }
 
 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
@@ -496,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 
        mutex_lock(&id_mgr->lock);
-       if (vm->reserved_vmid[vmhub]) {
-               list_add(&vm->reserved_vmid[vmhub]->list,
-                       &id_mgr->ids_lru);
-               vm->reserved_vmid[vmhub] = NULL;
-               atomic_dec(&id_mgr->reserved_vmid_num);
+       if (vm->reserved_vmid[vmhub] &&
+           !--id_mgr->reserved_use_count) {
+               /* give the reserved ID back to normal round robin */
+               list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
        }
+       vm->reserved_vmid[vmhub] = false;
        mutex_unlock(&id_mgr->lock);
 }
 
@@ -568,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
 
                mutex_init(&id_mgr->lock);
                INIT_LIST_HEAD(&id_mgr->ids_lru);
-               atomic_set(&id_mgr->reserved_vmid_num, 0);
+               id_mgr->reserved_use_count = 0;
 
                /* manage only VMIDs not used by KFD */
                id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 57efe61dceed..d1cc09b45da4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr {
        unsigned                num_ids;
        struct list_head        ids_lru;
        struct amdgpu_vmid      ids[AMDGPU_NUM_VMID];
-       atomic_t                reserved_vmid_num;
+       struct amdgpu_vmid      *reserved;
+       unsigned int            reserved_use_count;
 };
 
 int amdgpu_pasid_alloc(unsigned int bits);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 6546e786bf00..094bb4807303 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -119,9 +119,6 @@ struct amdgpu_bo_vm;
 /* Reserve 2MB at top/bottom of address space for kernel use */
 #define AMDGPU_VA_RESERVED_SIZE                        (2ULL << 20)
 
-/* max vmids dedicated for process */
-#define AMDGPU_VM_MAX_RESERVED_VMID    1
-
 /* See vm_update_mode */
 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
@@ -298,8 +295,7 @@ struct amdgpu_vm {
        struct dma_fence        *last_unlocked;
 
        unsigned int            pasid;
-       /* dedicated to vm */
-       struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
+       bool                    reserved_vmid[AMDGPU_MAX_VMHUBS];
 
        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
        bool                                    use_cpu_for_update;
commit 053499f7b45dc56758240615569b349fe9e2fc8d
Author: Christian König <christian.koe...@amd.com>
Date:   Fri Nov 25 16:45:09 2022 +0100

    drm/amdgpu: stop waiting for the VM during unreserve
    
    This is completely pointless since the VMID always stays allocated until
    the VM is idle.
    
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a05cce3f3170..dc379dc22c77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2368,7 +2368,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
        union drm_amdgpu_vm *args = data;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       long timeout = msecs_to_jiffies(2000);
        int r;
 
        switch (args->in.op) {
@@ -2380,21 +2379,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
                        return r;
                break;
        case AMDGPU_VM_OP_UNRESERVE_VMID:
-               if (amdgpu_sriov_runtime(adev))
-                       timeout = 8 * timeout;
-
-               /* Wait vm idle to make sure the vmid set in SPM_VMID is
-                * not referenced anymore.
-                */
-               r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
-               if (r)
-                       return r;
-
-               r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
-               if (r < 0)
-                       return r;
-
-               amdgpu_bo_unreserve(fpriv->vm.root.bo);
                amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
                break;
        default:
commit 5f3c40e9e2460c42f5bf6c51b1e393d7159241c3
Author: Christian König <christian.koe...@amd.com>
Date:   Fri Nov 25 16:42:45 2022 +0100

    drm/amdgpu: cleanup SPM support a bit
    
    This should probably not access job->vm and also emit the SPM switch
    under the conditional execute.
    
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 01878145a586..6949dfec75d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -315,6 +315,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
                return r;
 
        job->vm_needs_flush = needs_flush;
+       job->spm_update_needed = true;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 02e85b040baf..52f2e313ea17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -54,6 +54,7 @@ struct amdgpu_job {
        uint32_t                preemption_status;
        bool                    vm_needs_flush;
        bool                    gds_switch_needed;
+       bool                    spm_update_needed;
        uint64_t                vm_pd_addr;
        unsigned                vmid;
        unsigned                pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 245c66ea10e7..a05cce3f3170 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -519,22 +519,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
+       bool spm_update_needed = job->spm_update_needed;
        bool gds_switch_needed = ring->funcs->emit_gds_switch &&
                job->gds_switch_needed;
        bool vm_flush_needed = job->vm_needs_flush;
        struct dma_fence *fence = NULL;
        bool pasid_mapping_needed = false;
        unsigned patch_offset = 0;
-       bool update_spm_vmid_needed = (job->vm && 
(job->vm->reserved_vmid[vmhub] != NULL));
        int r;
 
-       if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
-
        if (amdgpu_vmid_had_gpu_reset(adev, id)) {
                gds_switch_needed = true;
                vm_flush_needed = true;
                pasid_mapping_needed = true;
+               spm_update_needed = true;
        }
 
        mutex_lock(&id_mgr->lock);
@@ -567,6 +565,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        if (pasid_mapping_needed)
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
+       if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
        if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
            gds_switch_needed) {
                amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
commit 56b0989e2939811c11ed9c449ff84cf85878ffe3
Author: Christian König <christian.koe...@amd.com>
Date:   Fri Nov 25 16:04:25 2022 +0100

    drm/amdgpu: fix GDS/GWS/OA switch handling
    
    Bas pointed out that this isn't working as expected and could cause
    crashes. Fix the handling by storing the marker that a switch is needed
    inside the job instead.
    
    Reported-by: Bas Nieuwenhuizen <b...@basnieuwenhuizen.nl>
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 2a9a2593dc18..01878145a586 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                atomic_read(&adev->gpu_reset_counter);
 }
 
+/* Check if we need to switch to another set of resources */
+static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
+                                         struct amdgpu_job *job)
+{
+       return id->gds_base != job->gds_base ||
+               id->gds_size != job->gds_size ||
+               id->gws_base != job->gws_base ||
+               id->gws_size != job->gws_size ||
+               id->oa_base != job->oa_base ||
+               id->oa_size != job->oa_size;
+}
+
+/* Check if the id is compatible with the job */
+static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
+                                  struct amdgpu_job *job)
+{
+       return  id->pd_gpu_addr == job->vm_pd_addr &&
+               !amdgpu_vmid_gds_switch_needed(id, job);
+}
+
 /**
  * amdgpu_vmid_grab_idle - grab idle VMID
  *
@@ -265,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 
        *id = vm->reserved_vmid[vmhub];
        if ((*id)->owner != vm->immediate.fence_context ||
-           (*id)->pd_gpu_addr != job->vm_pd_addr ||
+           !amdgpu_vmid_compatible(*id, job) ||
            (*id)->flushed_updates < updates ||
            !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
@@ -294,7 +314,6 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
        if (r)
                return r;
 
-       (*id)->flushed_updates = updates;
        job->vm_needs_flush = needs_flush;
        return 0;
 }
@@ -333,7 +352,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                if ((*id)->owner != vm->immediate.fence_context)
                        continue;
 
-               if ((*id)->pd_gpu_addr != job->vm_pd_addr)
+               if (!amdgpu_vmid_compatible(*id, job))
                        continue;
 
                if (!(*id)->last_flush ||
@@ -355,7 +374,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                if (r)
                        return r;
 
-               (*id)->flushed_updates = updates;
                job->vm_needs_flush |= needs_flush;
                return 0;
        }
@@ -408,22 +426,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
                        if (r)
                                goto error;
 
-                       id->flushed_updates = amdgpu_vm_tlb_seq(vm);
                        job->vm_needs_flush = true;
                }
 
                list_move_tail(&id->list, &id_mgr->ids_lru);
        }
 
-       id->pd_gpu_addr = job->vm_pd_addr;
-       id->owner = vm->immediate.fence_context;
-
+       job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
        if (job->vm_needs_flush) {
+               id->flushed_updates = amdgpu_vm_tlb_seq(vm);
                dma_fence_put(id->last_flush);
                id->last_flush = NULL;
        }
        job->vmid = id - id_mgr->ids;
        job->pasid = vm->pasid;
+
+       id->gds_base = job->gds_base;
+       id->gds_size = job->gds_size;
+       id->gws_base = job->gws_base;
+       id->gws_size = job->gws_size;
+       id->oa_base = job->oa_base;
+       id->oa_size = job->oa_size;
+       id->pd_gpu_addr = job->vm_pd_addr;
+       id->owner = vm->immediate.fence_context;
+
        trace_amdgpu_vm_grab_id(vm, ring, job);
 
 error:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index a372802ea4e0..02e85b040baf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -53,6 +53,7 @@ struct amdgpu_job {
        uint32_t                preamble_status;
        uint32_t                preemption_status;
        bool                    vm_needs_flush;
+       bool                    gds_switch_needed;
        uint64_t                vm_pd_addr;
        unsigned                vmid;
        unsigned                pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c05cff979004..245c66ea10e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -484,25 +484,20 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring 
*ring,
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-       struct amdgpu_vmid *id;
-       bool gds_switch_needed;
-       bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
 
        if (job->vmid == 0)
                return false;
-       id = &id_mgr->ids[job->vmid];
-       gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != job->gds_base ||
-               id->gds_size != job->gds_size ||
-               id->gws_base != job->gws_base ||
-               id->gws_size != job->gws_size ||
-               id->oa_base != job->oa_base ||
-               id->oa_size != job->oa_size);
-
-       if (amdgpu_vmid_had_gpu_reset(adev, id))
+
+       if (job->vm_needs_flush || ring->has_compute_vm_bug)
+               return true;
+
+       if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
                return true;
 
-       return vm_flush_needed || gds_switch_needed;
+       if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
+               return true;
+
+       return false;
 }
 
 /**
@@ -524,13 +519,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
-       bool gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != job->gds_base ||
-               id->gds_size != job->gds_size ||
-               id->gws_base != job->gws_base ||
-               id->gws_size != job->gws_size ||
-               id->oa_base != job->oa_base ||
-               id->oa_size != job->oa_size);
+       bool gds_switch_needed = ring->funcs->emit_gds_switch &&
+               job->gds_switch_needed;
        bool vm_flush_needed = job->vm_needs_flush;
        struct dma_fence *fence = NULL;
        bool pasid_mapping_needed = false;
@@ -577,6 +567,14 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        if (pasid_mapping_needed)
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
+       if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+           gds_switch_needed) {
+               amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+                                           job->gds_size, job->gws_base,
+                                           job->gws_size, job->oa_base,
+                                           job->oa_size);
+       }
+
        if (vm_flush_needed || pasid_mapping_needed) {
                r = amdgpu_fence_emit(ring, &fence, NULL, 0);
                if (r)
@@ -601,20 +599,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct 
amdgpu_job *job,
        }
        dma_fence_put(fence);
 
-       if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
-           gds_switch_needed) {
-               id->gds_base = job->gds_base;
-               id->gds_size = job->gds_size;
-               id->gws_base = job->gws_base;
-               id->gws_size = job->gws_size;
-               id->oa_base = job->oa_base;
-               id->oa_size = job->oa_size;
-               amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
-                                           job->gds_size, job->gws_base,
-                                           job->gws_size, job->oa_base,
-                                           job->oa_size);
-       }
-
        if (ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
commit e0607c10ebf551a654c3577fc74b4bf5533e1cea
Author: Evan Quan <evan.q...@amd.com>
Date:   Fri Dec 9 16:09:58 2022 +0800

    drm/amd/pm: add missing SMU13.0.7 mm_dpm feature mapping
    
    Without this, the pp_dpm_vclk and pp_dpm_dclk outputs are not with
    correct data.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c3c9ef523e59..c270f94a1b86 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -189,6 +189,8 @@ static struct cmn2asic_mapping 
smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(MEM_TEMP_READ),
        FEA_MAP(ATHUB_MMHUB_PG),
        FEA_MAP(SOC_PCC),
+       [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
commit 592cd24a08763975c75be850a7d4e461bfd353bf
Author: Evan Quan <evan.q...@amd.com>
Date:   Fri Dec 9 16:05:12 2022 +0800

    drm/amd/pm: add missing SMU13.0.0 mm_dpm feature mapping
    
    Without this, the pp_dpm_vclk and pp_dpm_dclk outputs are not with
    correct data.
    
    Signed-off-by: Evan Quan <evan.q...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org # 6.0.x

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 87d7c66e49ef..21d89c3302f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -210,6 +210,8 @@ static struct cmn2asic_mapping 
smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(MEM_TEMP_READ),
        FEA_MAP(ATHUB_MMHUB_PG),
        FEA_MAP(SOC_PCC),
+       [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
commit f95f51a4c3357eabf74fe14ab7daa5b5c0422b27
Author: Felix Kuehling <felix.kuehl...@amd.com>
Date:   Wed Apr 21 21:09:54 2021 -0400

    drm/amdgpu: Add notifier lock for KFD userptrs
    
    Add a per-process MMU notifier lock for processing notifiers from
    userptrs. Use that lock to properly synchronize page table updates with
    MMU notifiers.
    
    Signed-off-by: Felix Kuehling <felix.kuehl...@amd.com>
    Reviewed-by: Xiaogang Chen<xiaogang.c...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index f50e3ba4d7a5..589939631ed4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -29,6 +29,7 @@
 #include <linux/mm.h>
 #include <linux/kthread.h>
 #include <linux/workqueue.h>
+#include <linux/mmu_notifier.h>
 #include <kgd_kfd_interface.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 #include "amdgpu_sync.h"
@@ -65,6 +66,7 @@ struct kgd_mem {
        struct mutex lock;
        struct amdgpu_bo *bo;
        struct dma_buf *dmabuf;
+       struct hmm_range *range;
        struct list_head attachments;
        /* protected by amdkfd_process_info.lock */
        struct ttm_validate_buffer validate_list;
@@ -75,7 +77,7 @@ struct kgd_mem {
 
        uint32_t alloc_flags;
 
-       atomic_t invalid;
+       uint32_t invalid;
        struct amdkfd_process_info *process_info;
 
        struct amdgpu_sync sync;
@@ -131,7 +133,8 @@ struct amdkfd_process_info {
        struct amdgpu_amdkfd_fence *eviction_fence;
 
        /* MMU-notifier related fields */
-       atomic_t evicted_bos;
+       struct mutex notifier_lock;
+       uint32_t evicted_bos;
        struct delayed_work restore_userptr_work;
        struct pid *pid;
        bool block_mmu_notifications;
@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void 
*data);
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem);
 #else
 static inline
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct 
amdgpu_bo *bo)
 }
 
 static inline
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem)
 {
        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 8782916e64a0..0a854bb8b47e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t 
user_addr,
                 * later stage when it is scheduled by another ioctl called by
                 * CRIU master process for the target pid for restore.
                 */
-               atomic_inc(&mem->invalid);
+               mutex_lock(&process_info->notifier_lock);
+               mem->invalid++;
+               mutex_unlock(&process_info->notifier_lock);
                mutex_unlock(&process_info->lock);
                return 0;
        }
@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void 
**process_info,
                        return -ENOMEM;
 
                mutex_init(&info->lock);
+               mutex_init(&info->notifier_lock);
                INIT_LIST_HEAD(&info->vm_list_head);
                INIT_LIST_HEAD(&info->kfd_bo_list);
                INIT_LIST_HEAD(&info->userptr_valid_list);
@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void 
**process_info,
                }
 
                info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
-               atomic_set(&info->evicted_bos, 0);
                INIT_DELAYED_WORK(&info->restore_userptr_work,
                                  amdgpu_amdkfd_restore_userptr_worker);
 
@@ -1372,6 +1374,7 @@ reserve_pd_fail:
                put_pid(info->pid);
 create_evict_fence_fail:
                mutex_destroy(&info->lock);
+               mutex_destroy(&info->notifier_lock);
                kfree(info);
        }
        return ret;
@@ -1496,6 +1499,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device 
*adev,
                cancel_delayed_work_sync(&process_info->restore_userptr_work);
                put_pid(process_info->pid);
                mutex_destroy(&process_info->lock);
+               mutex_destroy(&process_info->notifier_lock);
                kfree(process_info);
        }
 }
@@ -1548,7 +1552,9 @@ int amdgpu_amdkfd_criu_resume(void *p)
 
        mutex_lock(&pinfo->lock);
        pr_debug("scheduling work\n");
-       atomic_inc(&pinfo->evicted_bos);
+       mutex_lock(&pinfo->notifier_lock);
+       pinfo->evicted_bos++;
+       mutex_unlock(&pinfo->notifier_lock);
        if (!READ_ONCE(pinfo->block_mmu_notifications)) {
                ret = -EINVAL;
                goto out_unlock;
@@ -1773,8 +1779,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        list_del(&bo_list_entry->head);
        mutex_unlock(&process_info->lock);
 
-       /* No more MMU notifiers */
-       amdgpu_hmm_unregister(mem->bo);
+       /* Cleanup user pages and MMU notifiers */
+       if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+               amdgpu_hmm_unregister(mem->bo);
+               mutex_lock(&process_info->notifier_lock);
+               amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+               mutex_unlock(&process_info->notifier_lock);
+       }
 
        ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
        if (unlikely(ret))
@@ -1864,6 +1875,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
         */
        mutex_lock(&mem->process_info->lock);
 
+       /* Lock notifier lock. If we find an invalid userptr BO, we can be
+        * sure that the MMU notifier is no longer running
+        * concurrently and the queues are actually stopped
+        */
+       if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+               mutex_lock(&mem->process_info->notifier_lock);
+               is_invalid_userptr = !!mem->invalid;
+               mutex_unlock(&mem->process_info->notifier_lock);
+       }
+
        mutex_lock(&mem->lock);
 
        domain = mem->domain;
@@ -2241,34 +2262,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct 
amdgpu_device *adev,
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
  * cannot do any memory allocations, and cannot take any locks that
- * are held elsewhere while allocating memory. Therefore this is as
- * simple as possible, using atomic counters.
+ * are held elsewhere while allocating memory.
  *
  * It doesn't do anything to the BO itself. The real work happens in
  * restore, where we get updated page addresses. This function only
  * ensures that GPU access to the BO is stopped.
  */
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
-                               struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
-       int evicted_bos;
        int r = 0;
 
-       /* Do not process MMU notifications until stage-4 IOCTL is received */
+       /* Do not process MMU notifications during CRIU restore until
+        * KFD_CRIU_OP_RESUME IOCTL is received
+        */
        if (READ_ONCE(process_info->block_mmu_notifications))
                return 0;
 
-       atomic_inc(&mem->invalid);
-       evicted_bos = atomic_inc_return(&process_info->evicted_bos);
-       if (evicted_bos == 1) {
+       mutex_lock(&process_info->notifier_lock);
+       mmu_interval_set_seq(mni, cur_seq);
+
+       mem->invalid++;
+       if (++process_info->evicted_bos == 1) {
                /* First eviction, stop the queues */
-               r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+               r = kgd2kfd_quiesce_mm(mni->mm,
+                                      KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
                if (r)
                        pr_err("Failed to quiesce KFD\n");
                schedule_delayed_work(&process_info->restore_userptr_work,
                        msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
        }
+       mutex_unlock(&process_info->notifier_lock);
 
        return r;
 }
@@ -2285,54 +2310,58 @@ static int update_invalid_user_pages(struct 
amdkfd_process_info *process_info,
        struct kgd_mem *mem, *tmp_mem;
        struct amdgpu_bo *bo;
        struct ttm_operation_ctx ctx = { false, false };
-       int invalid, ret;
+       uint32_t invalid;
+       int ret = 0;
 
-       /* Move all invalidated BOs to the userptr_inval_list and
-        * release their user pages by migration to the CPU domain
-        */
+       mutex_lock(&process_info->notifier_lock);
+
+       /* Move all invalidated BOs to the userptr_inval_list */
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_valid_list,
-                                validate_list.head) {
-               if (!atomic_read(&mem->invalid))
-                       continue; /* BO is still valid */
-
-               bo = mem->bo;
-
-               if (amdgpu_bo_reserve(bo, true))
-                       return -EAGAIN;
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
-               ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-               amdgpu_bo_unreserve(bo);
-               if (ret) {
-                       pr_err("%s: Failed to invalidate userptr BO\n",
-                              __func__);
-                       return -EAGAIN;
-               }
-
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_inval_list);
-       }
-
-       if (list_empty(&process_info->userptr_inval_list))
-               return 0; /* All evicted userptr BOs were freed */
+                                validate_list.head)
+               if (mem->invalid)
+                       list_move_tail(&mem->validate_list.head,
+                                      &process_info->userptr_inval_list);
 
        /* Go through userptr_inval_list and update any invalid user_pages */
        list_for_each_entry(mem, &process_info->userptr_inval_list,
                            validate_list.head) {
-               struct hmm_range *range;
-
-               invalid = atomic_read(&mem->invalid);
+               invalid = mem->invalid;
                if (!invalid)
                        /* BO hasn't been invalidated since the last
-                        * revalidation attempt. Keep its BO list.
+                        * revalidation attempt. Keep its page list.
                         */
                        continue;
 
                bo = mem->bo;
 
+               amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+               mem->range = NULL;
+
+               /* BO reservations and getting user pages (hmm_range_fault)
+                * must happen outside the notifier lock
+                */
+               mutex_unlock(&process_info->notifier_lock);
+
+               /* Move the BO to system (CPU) domain if necessary to unmap
+                * and free the SG table
+                */
+               if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
+                       if (amdgpu_bo_reserve(bo, true))
+                               return -EAGAIN;
+                       amdgpu_bo_placement_from_domain(bo, 
AMDGPU_GEM_DOMAIN_CPU);
+                       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+                       amdgpu_bo_unreserve(bo);
+                       if (ret) {
+                               pr_err("%s: Failed to invalidate userptr BO\n",
+                                      __func__);
+                               return -EAGAIN;
+                       }
+               }
+
                /* Get updated user pages */
                ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
-                                                  &range);
+                                                  &mem->range);
                if (ret) {
                        pr_debug("Failed %d to get user pages\n", ret);
 
@@ -2345,30 +2374,32 @@ static int update_invalid_user_pages(struct 
amdkfd_process_info *process_info,
                         */
                        if (ret != -EFAULT)
                                return ret;
-               } else {
 
-                       /*
-                        * FIXME: Cannot ignore the return code, must hold
-                        * notifier_lock
-                        */
-                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+                       ret = 0;
                }
 
+               mutex_lock(&process_info->notifier_lock);
+
                /* Mark the BO as valid unless it was invalidated
                 * again concurrently.
                 */
-               if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
-                       return -EAGAIN;
+               if (mem->invalid != invalid) {
+                       ret = -EAGAIN;
+                       goto unlock_out;
+               }
+               mem->invalid = 0;
        }
 
-       return 0;
+unlock_out:
+       mutex_unlock(&process_info->notifier_lock);
+
+       return ret;
 }
 
 /* Validate invalid userptr BOs
  *
- * Validates BOs on the userptr_inval_list, and moves them back to the
- * userptr_valid_list. Also updates GPUVM page tables with new page
- * addresses and waits for the page table updates to complete.
+ * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
+ * with new page addresses and waits for the page table updates to complete.
  */
 static int validate_invalid_user_pages(struct amdkfd_process_info 
*process_info)
 {
@@ -2439,9 +2470,6 @@ static int validate_invalid_user_pages(struct 
amdkfd_process_info *process_info)
                        }
                }
 
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_valid_list);
-
                /* Update mapping. If the BO was not validated
                 * (because we couldn't get user pages), this will
                 * clear the page table entries, which will result in
@@ -2457,7 +2485,9 @@ static int validate_invalid_user_pages(struct 
amdkfd_process_info *process_info)
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
-                               atomic_inc(&mem->invalid);
+                               mutex_lock(&process_info->notifier_lock);
+                               mem->invalid++;
+                               mutex_unlock(&process_info->notifier_lock);
                                goto unreserve_out;
                        }
                }
@@ -2477,6 +2507,36 @@ out_no_mem:
        return ret;
 }
 
+/* Confirm that all user pages are valid while holding the notifier lock
+ *
+ * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
+ */
+static int confirm_valid_user_pages_locked(struct amdkfd_process_info 
*process_info)
+{
+       struct kgd_mem *mem, *tmp_mem;
+       int ret = 0;
+
+       list_for_each_entry_safe(mem, tmp_mem,
+                                &process_info->userptr_inval_list,
+                                validate_list.head) {
+               bool valid = amdgpu_ttm_tt_get_user_pages_done(
+                               mem->bo->tbo.ttm, mem->range);
+
+               mem->range = NULL;
+               if (!valid) {
+                       WARN(!mem->invalid, "Invalid BO not marked invalid");
+                       ret = -EAGAIN;
+                       continue;
+               }
+               WARN(mem->invalid, "Valid BO is marked invalid");
+
+               list_move_tail(&mem->validate_list.head,
+                              &process_info->userptr_valid_list);
+       }
+
+       return ret;
+}
+
 /* Worker callback to restore evicted userptr BOs
  *
  * Tries to update and validate all userptr BOs. If successful and no
@@ -2491,9 +2551,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct 
work_struct *work)
                             restore_userptr_work);
        struct task_struct *usertask;
        struct mm_struct *mm;
-       int evicted_bos;
+       uint32_t evicted_bos;
 
-       evicted_bos = atomic_read(&process_info->evicted_bos);
+       mutex_lock(&process_info->notifier_lock);
+       evicted_bos = process_info->evicted_bos;
+       mutex_unlock(&process_info->notifier_lock);
        if (!evicted_bos)
                return;
 
@@ -2516,9 +2578,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct 
work_struct *work)
         * and we can just restart the queues.
         */
        if (!list_empty(&process_info->userptr_inval_list)) {
-               if (atomic_read(&process_info->evicted_bos) != evicted_bos)
-                       goto unlock_out; /* Concurrent eviction, try again */
-
                if (validate_invalid_user_pages(process_info))
                        goto unlock_out;
        }
@@ -2527,10 +2586,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct 
work_struct *work)
         * be a first eviction that calls quiesce_mm. The eviction
         * reference counting inside KFD will handle this case.
         */
-       if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
-           evicted_bos)
-               goto unlock_out;
-       evicted_bos = 0;
+       mutex_lock(&process_info->notifier_lock);
+       if (process_info->evicted_bos != evicted_bos)
+               goto unlock_notifier_out;
+
+       if (confirm_valid_user_pages_locked(process_info)) {
+               WARN(1, "User pages unexpectedly invalid");
+               goto unlock_notifier_out;
+       }
+
+       process_info->evicted_bos = evicted_bos = 0;
+
        if (kgd2kfd_resume_mm(mm)) {
                pr_err("%s: Failed to resume KFD\n", __func__);
                /* No recovery from this failure. Probably the CP is
@@ -2538,6 +2604,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct 
work_struct *work)
                 */
        }
 
+unlock_notifier_out:
+       mutex_unlock(&process_info->notifier_lock);
 unlock_out:
        mutex_unlock(&process_info->lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 65715cb395d8..2dadcfe43d03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct 
mmu_interval_notifier *mni,
                                      unsigned long cur_seq)
 {
        struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
        if (!mmu_notifier_range_blockable(range))
                return false;
 
-       mutex_lock(&adev->notifier_lock);
-
-       mmu_interval_set_seq(mni, cur_seq);
-
-       amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
-       mutex_unlock(&adev->notifier_lock);
+       amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
 
        return true;
 }
@@ -244,9 +238,9 @@ out_free_range:
        return r;
 }
 
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
 {
-       int r;
+       bool r;
 
        r = mmu_interval_read_retry(hmm_range->notifier,
                                    hmm_range->notifier_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index 13ed94d3b01b..e2edcd010ccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -29,12 +29,13 @@
 #include <linux/rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
 
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner, struct page **pages,
                               struct hmm_range **phmm_range);
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
 
 #if defined(CONFIG_HMM_MIRROR)
 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b4236572eae1..f0e4c7309438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -695,8 +695,19 @@ out_unlock:
        return r;
 }
 
+/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
+ */
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                     struct hmm_range *range)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt && gtt->userptr && range)
+               amdgpu_hmm_range_get_pages_done(range);
+}
+
 /*
- * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
+ * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
  * Check if the pages backing this ttm range have been invalidated
  *
  * Returns: true if pages are still valid
@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
 
        WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
 
-       /*
-        * FIXME: Must always hold notifier_lock for this, and must
-        * not ignore the return code.
-        */
        return !amdgpu_hmm_range_get_pages_done(range);
 }
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b4d8ba2789f3..e2cd5894afc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device 
*adev, uint32_t type);
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
                                 struct hmm_range **range);
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                     struct hmm_range *range);
 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
                                       struct hmm_range *range);
 #else
@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct 
amdgpu_bo *bo,
 {
        return -EPERM;
 }
+static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                                   struct hmm_range *range)
+{
+}
 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
                                                     struct hmm_range *range)
 {
commit fe6872adb05e85bde38f2cdec01a0f4cfb826998
Author: Yifan Zhang <yifan1.zh...@amd.com>
Date:   Thu Dec 8 11:55:15 2022 +0800

    drm/amd/display: Add DCN314 display SG Support
    
    Add display SG support for DCN 3.1.4.
    
    Signed-off-by: Yifan Zhang <yifan1.zh...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 77277d90b6e2..50c783e19f5a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1503,6 +1503,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
+               case IP_VERSION(3, 1, 4):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
commit 4d2ccd96ac25846749fc58691f5142a966e65b3a
Author: Christian König <christian.koe...@amd.com>
Date:   Wed Nov 16 15:45:36 2022 +0100

    drm/amdgpu: WARN when freeing kernel memory during suspend
    
    When buffers are freed during suspend there is no guarantee that
    they can be re-allocated during resume.
    
    The PSP subsystem seems to be quite buggy regarding this, so add
    a WARN_ON() to point out those bugs.
    
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Alex Deucher <alexdeuc...@amd.com>
    Tested-by: Guilherme G. Piccoli <gpicc...@igalia.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2df55cc7e07f..3393c1a6a0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -422,6 +422,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 
*gpu_addr,
        if (*bo == NULL)
                return;
 
+       WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
+
        if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
                if (cpu_addr)
                        amdgpu_bo_kunmap(*bo);
commit 9c3db58bf8f7d0007049f686ce8c419eed4325d1
Author: Christian König <christian.koe...@amd.com>
Date:   Wed Dec 7 08:47:30 2022 +0100

    drm/amdgpu: fixx NULL pointer deref in gmc_v9_0_get_vm_pte
    
    We not only need to make sure that we have a BO, but also that the BO
    has some backing store.
    
    Fixes: d1a372af1c3d ("drm/amdgpu: Set MTYPE in PTE based on BO flags")
    Signed-off-by: Christian König <christian.koe...@amd.com>
    Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
    Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
    Reviewed-by: Luben Tuikov <luben.tui...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 50386eb2eec8..08d6cf79fb15 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1185,6 +1185,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device 
*adev,
                                struct amdgpu_bo_va_mapping *mapping,
                                uint64_t *flags)
 {
+       struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
        *flags &= ~AMDGPU_PTE_EXECUTABLE;
        *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 
@@ -1196,7 +1198,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device 
*adev,
                *flags &= ~AMDGPU_PTE_VALID;
        }
 
-       if (mapping->bo_va->base.bo)
+       if (bo && bo->tbo.resource)
                gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
                                             mapping, flags);
 }
commit 47ea20762bb7875a62e10433a3cd5d34e9133f47
Author: Shikang Fan <shikang....@amd.com>
Date:   Thu Dec 8 19:53:14 2022 +0800

    drm/amdgpu: Add an extra evict_resource call during device_suspend.
    
    - evict_resource is taking too long causing sriov full access mode timeout.
      So, add an extra evict_resource in the beginning as an early evict.
    
    Signed-off-by: Shikang Fan <shikang....@amd.com>
    Reviewed-by: Christian König <christian.koe...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cfa411c12072..64660a41d53c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4112,6 +4112,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool 
fbcon)
 
        adev->in_suspend = true;
 
+       /* Evict the majority of BOs before grabbing the full access */
+       r = amdgpu_device_evict_resources(adev);
+       if (r)
+               return r;
+
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_fini_data_exchange(adev);
                r = amdgpu_virt_request_full_gpu(adev, false);
commit 31a2e6cbe8a4eb0d1650fff4b77872b744e14a62
Author: Chris Wilson <chris.p.wil...@intel.com>
Date:   Fri Dec 2 12:28:42 2022 +0000

    drm/i915/migrate: Account for the reserved_space
    
    If the ring is nearly full when calling into emit_pte(), we might
    incorrectly trample the reserved_space when constructing the packet to
    emit the PTEs. This then triggers the GEM_BUG_ON(rq->reserved_space >
    ring->space) when later submitting the request, since the request itself
    doesn't have enough space left in the ring to emit things like
    workarounds, breadcrumbs etc.
    
    v2: Fix the whitespace errors
    
    Testcase: igt@i915_selftests@live_emit_pte_full_ring
    Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/7535
    Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/6889
    Fixes: cf586021642d ("drm/i915/gt: Pipelined page migration")
    Signed-off-by: Chris Wilson <chris.p.wil...@intel.com>
    Signed-off-by: Matthew Auld <matthew.a...@intel.com>
    Cc: Andrzej Hajda <andrzej.ha...@intel.com>
    Cc: Andi Shyti <andi.sh...@linux.intel.com>
    Cc: Nirmoy Das <nirmoy....@intel.com>
    Cc: <sta...@vger.kernel.org> # v5.15+
    Tested-by: Nirmoy Das <nirmoy....@intel.com>
    Reviewed-by: Nirmoy Das <nirmoy....@intel.com>
    Reviewed-by: Andrzej Hajda <andrzej.ha...@intel.com>
    Reviewed-by: Andi Shyti <andi.sh...@linux.intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221202122844.428006-1-matthew.a...@intel.com
    (cherry picked from commit 35168a6c4ed53db4f786858bac23b1474fd7d0dc)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c 
b/drivers/gpu/drm/i915/gt/intel_migrate.c
index b405a04135ca..b783f6f740c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -342,6 +342,16 @@ static int emit_no_arbitration(struct i915_request *rq)
        return 0;
 }
 
+static int max_pte_pkt_size(struct i915_request *rq, int pkt)
+{
+       struct intel_ring *ring = rq->ring;
+
+       pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) 
+ 5);
+       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+       return pkt;
+}
+
 static int emit_pte(struct i915_request *rq,
                    struct sgt_dma *it,
                    enum i915_cache_level cache_level,
@@ -388,8 +398,7 @@ static int emit_pte(struct i915_request *rq,
                return PTR_ERR(cs);
 
        /* Pack as many PTE updates as possible into a single MI command */
-       pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
-       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+       pkt = max_pte_pkt_size(rq, dword_length);
 
        hdr = cs;
        *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
@@ -422,8 +431,7 @@ static int emit_pte(struct i915_request *rq,
                                }
                        }
 
-                       pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 
5);
-                       pkt = min_t(int, pkt, (ring->size - ring->emit) / 
sizeof(u32) + 5);
+                       pkt = max_pte_pkt_size(rq, dword_rem);
 
                        hdr = cs;
                        *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
commit 3153eebb7a76e663ac76d6670dc113296de96622
Author: Khaled Almahallawy <khaled.almahall...@intel.com>
Date:   Wed Nov 23 14:09:26 2022 -0800

    drm/i915/display: Don't disable DDI/Transcoder when setting phy test pattern
    
    Bspecs has updated recently to remove the restriction to disable
    DDI/Transcoder before setting PHY test pattern. This update is to
    address PHY compliance test failures observed on a port with LTTPR.
    The issue is that when Transc. is disabled, the main link signals fed
    to LTTPR will be dropped invalidating link training, which will affect
    the quality of the phy test pattern when the transcoder is enabled again.
    
    v2: Update commit message (Clint)
    v3: Add missing Signed-off in v2
    v4: Update Bspec and commit message for pre-gen12 (Jani)
    
    Bspec: 50482, 7555
    Fixes: 8cdf72711928 ("drm/i915/dp: Program vswing, pre-emphasis, 
test-pattern")
    Cc: Imre Deak <imre.d...@intel.com>
    Cc: Clint Taylor <clinton.a.tay...@intel.com>
    CC: Jani Nikula <jani.nik...@intel.com>
    Tested-by: Khaled Almahallawy <khaled.almahall...@intel.com>
    Reviewed-by: Clint Taylor <clinton.a.tay...@intel.com>
    Signed-off-by: Khaled Almahallawy <khaled.almahall...@intel.com>
    Signed-off-by: Jani Nikula <jani.nik...@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221123220926.170034-1-khaled.almahall...@intel.com
    (cherry picked from commit be4a847652056b067d6dc6fe0fc024a9e2e987ca)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/display/intel_dp.c 
b/drivers/gpu/drm/i915/display/intel_dp.c
index 67089711d9e2..75070eb07d4b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -3679,61 +3679,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp 
*intel_dp,
        }
 }
 
-static void
-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-       enum pipe pipe = crtc->pipe;
-       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
-       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
-                                                TRANS_DDI_FUNC_CTL(pipe));
-       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
-       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
-       trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
-                                     TGL_TRANS_DDI_PORT_MASK);
-       trans_conf_value &= ~PIPECONF_ENABLE;
-       dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
-
-       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
-                      trans_ddi_func_ctl_value);
-       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
-}
-
-static void
-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
-                                const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum port port = dig_port->base.port;
-       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-       enum pipe pipe = crtc->pipe;
-       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
-       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
-                                                TRANS_DDI_FUNC_CTL(pipe));
-       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
-       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
-       trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
-                                   TGL_TRANS_DDI_SELECT_PORT(port);
-       trans_conf_value |= PIPECONF_ENABLE;
-       dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
-
-       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
-       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
-                      trans_ddi_func_ctl_value);
-}
-
 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
                                         const struct intel_crtc_state 
*crtc_state)
 {
@@ -3752,14 +3697,10 @@ static void intel_dp_process_phy_request(struct 
intel_dp *intel_dp,
        intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
                                  link_status);
 
-       intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
-
        intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
 
        intel_dp_phy_pattern_update(intel_dp, crtc_state);
 
-       intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
-
        drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
                          intel_dp->train_set, crtc_state->lane_count);
 
commit b2e9e6a9cb87ce4a82fb106ae16c94639835fd47
Author: Ville Syrjälä <ville.syrj...@linux.intel.com>
Date:   Thu Dec 8 00:52:19 2022 +0200

    drm/i915: Fix VLV/CHV HDMI/DP audio enable
    
    Despite what I claimed in commit c3c5dc1d9224
    ("drm/i915/audio: Do the vblank waits") the vblank
    interrupts are in fact not enabled yet when we do the
    audio enable sequence on VLV/CHV (all other platforms are
    fine).
    
    Reorder the enable sequence on VLV/CHV to match that of the
    other platforms so that the audio enable happens after the
    pipe has been enabled.
    
    Fixes: c3c5dc1d9224 ("drm/i915/audio: Do the vblank waits")
    Signed-off-by: Ville Syrjälä <ville.syrj...@linux.intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221207225219.29060-1-ville.syrj...@linux.intel.com
    Reviewed-by: Jani Nikula <jani.nik...@intel.com>
    (cherry picked from commit a467a243554a64b418c14d7531a3b18c03d53bff)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c 
b/drivers/gpu/drm/i915/display/g4x_dp.c
index 3593938dcd87..24ef36ec2d3d 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -673,8 +673,6 @@ static void intel_enable_dp(struct intel_atomic_state 
*state,
        intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
        intel_dp_start_link_train(intel_dp, pipe_config);
        intel_dp_stop_link_train(intel_dp, pipe_config);
-
-       intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
 static void g4x_enable_dp(struct intel_atomic_state *state,
@@ -683,6 +681,7 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
                          const struct drm_connector_state *conn_state)
 {
        intel_enable_dp(state, encoder, pipe_config, conn_state);
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
@@ -691,6 +690,7 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
 {
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c 
b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 121caeaa409b..c3580d96765c 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -157,10 +157,8 @@ static void intel_hdmi_get_config(struct intel_encoder 
*encoder,
                             &pipe_config->infoframes.hdmi);
 }
 
-static void g4x_enable_hdmi(struct intel_atomic_state *state,
-                           struct intel_encoder *encoder,
-                           const struct intel_crtc_state *pipe_config,
-                           const struct drm_connector_state *conn_state)
+static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -175,6 +173,16 @@ static void g4x_enable_hdmi(struct intel_atomic_state 
*state,
 
        intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
        intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+}
+
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
+                           const struct intel_crtc_state *pipe_config,
+                           const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
                    !pipe_config->has_hdmi_sink);
@@ -294,6 +302,11 @@ static void vlv_enable_hdmi(struct intel_atomic_state 
*state,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+                   !pipe_config->has_hdmi_sink);
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
 static void intel_disable_hdmi(struct intel_atomic_state *state,
@@ -415,7 +428,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state 
*state,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 }
@@ -492,7 +505,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state 
*state,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 
commit 1d4624cd72b912b2680c08d0be48338a1629a858
Author: Alex Deucher <alexander.deuc...@amd.com>
Date:   Mon Nov 21 15:52:19 2022 -0500

    drm/amdgpu: handle polaris10/11 overlap asics (v2)
    
    Some special polaris 10 chips overlap with the polaris11
    DID range.  Handle this properly in the driver.
    
    v2: use local flags for other function calls.
    
    Acked-by: Luben Tuikov <luben.tui...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7383272c6a3a..b4f2d61ea0d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                         "See modparam exp_hw_support\n");
                return -ENODEV;
        }
+       /* differentiate between P10 and P11 asics with the same DID */
+       if (pdev->device == 0x67FF &&
+           (pdev->revision == 0xE3 ||
+            pdev->revision == 0xE7 ||
+            pdev->revision == 0xF3 ||
+            pdev->revision == 0xF7)) {
+               flags &= ~AMD_ASIC_MASK;
+               flags |= CHIP_POLARIS10;
+       }
 
        /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU 
mapping,
         * however, SME requires an indirect IOMMU mapping because the 
encryption
@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, ddev);
 
-       ret = amdgpu_driver_load_kms(adev, ent->driver_data);
+       ret = amdgpu_driver_load_kms(adev, flags);
        if (ret)
                goto err_pci;
 
 retry_init:
-       ret = drm_dev_register(ddev, ent->driver_data);
+       ret = drm_dev_register(ddev, flags);
        if (ret == -EAGAIN && ++retry <= 3) {
                DRM_INFO("retry init %d\n", retry);
                /* Don't request EX mode too frequently which is attacking */
commit 81d0bcf9900932633d270d5bc4a54ff599c6ebdb
Author: Alex Deucher <alexander.deuc...@amd.com>
Date:   Wed Dec 7 11:08:53 2022 -0500

    drm/amdgpu: make display pinning more flexible (v2)
    
    Only apply the static threshold for Stoney and Carrizo.
    This hardware has certain requirements that don't allow
    mixing of GTT and VRAM.  Newer asics do not have these
    requirements so we should be able to be more flexible
    with where buffers end up.
    
    Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2270
    Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2291
    Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2255
    Acked-by: Luben Tuikov <luben.tui...@amd.com>
    Reviewed-by: Christian König <christian.koe...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
    Cc: sta...@vger.kernel.org

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 919bbea2e3ac..2df55cc7e07f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1506,7 +1506,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
                                            uint32_t domain)
 {
-       if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+       if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
+           ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == 
CHIP_STONEY))) {
                domain = AMDGPU_GEM_DOMAIN_VRAM;
                if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
                        domain = AMDGPU_GEM_DOMAIN_GTT;
commit 2165359b7ed4e0b93fc23f49ede38d76e91fffe1
Author: Colin Ian King <colin.i.k...@gmail.com>
Date:   Thu Dec 8 12:18:54 2022 +0000

    drm/amd/display: Fix spelling mistake: "dram_clk_chanage" -> 
"dram_clk_change"
    
    There is a spelling mistake in the struct field dram_clk_chanage. Fix it.
    
    Signed-off-by: Colin Ian King <colin.i.k...@gmail.com>
    Signed-off-by: Hamza Mahfooz <hamza.mahf...@amd.com>
    Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 0f746bb4e500..d51f1ce02874 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -55,7 +55,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -65,7 +65,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -75,7 +75,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -85,7 +85,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
 }
 
 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c8ec11839b4d..fe2023f18b7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -159,7 +159,7 @@ static void dcn10_log_hubbub_state(struct dc *dc,
                DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
                DTN_INFO_MICRO_SEC(s->sr_enter);
                DTN_INFO_MICRO_SEC(s->sr_exit);
-               DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+               DTN_INFO_MICRO_SEC(s->dram_clk_change);
                DTN_INFO("\n");
        }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index e8b6065fffad..a0f8e31d2adc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -83,7 +83,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, 
char *pBuf, unsigned i
        memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
        dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
 
-       chars_printed = snprintf_count(pBuf, remaining_buffer, 
"wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n");
+       chars_printed = snprintf_count(pBuf, remaining_buffer, 
"wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");
        remaining_buffer -= chars_printed;
        pBuf += chars_printed;
 
@@ -98,7 +98,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, 
char *pBuf, unsigned i
                        (s->pte_meta_urgent * frac) / ref_clk_mhz / frac, 
(s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
                        (s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter 
* frac) / ref_clk_mhz % frac,
                        (s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * 
frac) / ref_clk_mhz % frac,
-                       (s->dram_clk_chanage * frac) / ref_clk_mhz / frac, 
(s->dram_clk_chanage * frac) / ref_clk_mhz % frac);
+                       (s->dram_clk_change * frac) / ref_clk_mhz / frac, 
(s->dram_clk_change * frac) / ref_clk_mhz % frac);
                remaining_buffer -= chars_printed;
                pBuf += chars_printed;
        }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index aacb1fb5c73e..24bd93219936 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -500,7 +500,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -511,7 +511,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -522,7 +522,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -533,7 +533,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
        }
-       s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+       s->dram_clk_change = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
 }
 
 void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..aeb0e0d9b70a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -635,7 +635,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
-                        DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 
&s->dram_clk_chanage);
+                        DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 
&s->dram_clk_change);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -649,7 +649,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 
&s->dram_clk_change);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -663,7 +663,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 
&s->dram_clk_change);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -677,7 +677,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 
&s->dram_clk_change);
 }
 
 static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 5947c2cb0f30..9501403a48a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -865,7 +865,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
-                        DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 
&s->dram_clk_chanage);
+                        DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 
&s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 
&s->usr_retrain);
@@ -885,7 +885,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 
&s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 
&s->usr_retrain);
@@ -905,7 +905,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 
&s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 
&s->usr_retrain);
@@ -925,7 +925,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 
&s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 
&s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 
&s->usr_retrain);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index f2e1fcb668fb..5b0265c0df61 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -46,7 +46,7 @@ struct dcn_hubbub_wm_set {
        uint32_t pte_meta_urgent;
        uint32_t sr_enter;
        uint32_t sr_exit;
-       uint32_t dram_clk_chanage;
+       uint32_t dram_clk_change;
        uint32_t usr_retrain;
        uint32_t fclk_pstate_change;
 };
commit d4d4c6fbae3837623708594a7499f40673fb0692
Author: Umesh Nerlige Ramappa <umesh.nerlige.rama...@intel.com>
Date:   Wed Nov 23 15:53:42 2022 -0800

    drm/i915/perf: Do not parse context image for HSW
    
    An earlier commit introduced a mechanism to parse the context image to
    find the OA context control offset. This resulted in an NPD on haswell
    when gem_context was passed into i915_perf_open_ioctl params. Haswell
    does not support logical ring contexts, so ensure that the context image
    is parsed only for platforms with logical ring contexts and also
    validate lrc_reg_state.
    
    v2: Fix build failure
    v3: Fix checkpatch error
    
    Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/7432
    Fixes: a5c3a3cbf029 ("drm/i915/perf: Determine gen12 oa ctx offset at 
runtime")
    Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.rama...@intel.com>
    Reviewed-by: Ashutosh Dixit <ashutosh.di...@intel.com>
    Signed-off-by: John Harrison <john.c.harri...@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221123235342.713068-1-umesh.nerlige.rama...@intel.com
    (cherry picked from commit 95c713d722017b26e301303713d638e0b95b1f68)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 00e09bb18b13..125b6ca25a75 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1383,6 +1383,9 @@ static u32 oa_context_image_offset(struct intel_context 
*ce, u32 reg)
        u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
        u32 *state = ce->lrc_reg_state;
 
+       if (drm_WARN_ON(&ce->engine->i915->drm, !state))
+               return U32_MAX;
+
        for (offset = 0; offset < len; ) {
                if (IS_MI_LRI_CMD(state[offset])) {
                        /*
@@ -1447,7 +1450,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream 
*stream)
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
-       if (engine_supports_mi_query(stream->engine)) {
+       if (engine_supports_mi_query(stream->engine) &&
+           HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
                /*
                 * We are enabling perf query here. If we don't find the context
                 * offset here, just return an error.
commit 449a0ef584d42ed24b7432c899863eaabe2583b5
Author: Miaoqian Lin <linmq...@gmail.com>
Date:   Wed Dec 7 15:29:09 2022 +0400

    drm/i915: Fix documentation for intel_uncore_forcewake_put__locked
    
    intel_uncore_forcewake_put__locked() is used to release a reference.
    
    Fixes: a6111f7b6604 ("drm/i915: Reduce locking in execlist command 
submission")
    Signed-off-by: Miaoqian Lin <linmq...@gmail.com>
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221207112909.2655251-1-linmq...@gmail.com
    (cherry picked from commit 955f4d7176eb154db587ae162ec2b392dc8d5f27)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/intel_uncore.c 
b/drivers/gpu/drm/i915/intel_uncore.c
index 8006a6c61466..614013745fca 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -824,9 +824,9 @@ void intel_uncore_forcewake_flush(struct intel_uncore 
*uncore,
 }
 
 /**
- * intel_uncore_forcewake_put__locked - grab forcewake domain references
+ * intel_uncore_forcewake_put__locked - release forcewake domain references
  * @uncore: the intel_uncore structure
- * @fw_domains: forcewake domains to get reference on
+ * @fw_domains: forcewake domains to put references
  *
  * See intel_uncore_forcewake_put(). This variant places the onus
  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
commit 2c1da39008fee00596ed33baeacaffa0dc62df25
Author: Matt Roper <matthew.d.ro...@intel.com>
Date:   Mon Nov 28 15:30:10 2022 -0800

    drm/i915/gt: Correct kerneldoc for intel_gt_mcr_wait_for_reg()
    
    The kerneldoc function name was not updated when this function was
    converted to a non-fw form.
    
    Fixes: 41f425adbce9 ("drm/i915/gt: Manage uncore->lock while waiting on MCR 
register")
    Reported-by: kernel test robot <l...@intel.com>
    Signed-off-by: Matt Roper <matthew.d.ro...@intel.com>
    Reviewed-by: Balasubramani Vivekanandan 
<balasubramani.vivekanan...@intel.com>
    Link: 
https://patchwork.freedesktop.org/patch/msgid/20221128233014.4000136-2-matthew.d.ro...@intel.com
    (cherry picked from commit 03b713d029bd17a1ed426590609af79843db95e2)
    Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c 
b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index d9a8ff9e5e57..ea86c1ab5dc5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -702,7 +702,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, 
unsigned int dss,
 }
 
 /**
- * intel_gt_mcr_wait_for_reg_fw - wait until MCR register matches expected 
state
+ * intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
  * @gt: GT structure
  * @reg: the register to read
  * @mask: mask to apply to register value

Reply via email to