On Wed, Aug 13, 2025 at 2:23 PM Sundararaju, Sathishkumar <sathishkumar.sundarar...@amd.com> wrote: > > Hi Alex, Hi David, > > I see David's concern but his suggestion yet wont solve the problem, > neither the current form , reason :- > > The emitted fence count and total submission count are fast transients > which frequently become 0 in between video decodes (between jobs) even > with the atomics and locks there can be a switch of video power profile, > in the current form of patch that window is minimized, but still can > happen if stress tested. But power state of any instance becoming zero
Can you explain how this can happen? I'm not seeing it. If it is possible, maybe it would be easier to just split the profile and powergating into separate handlers. The profile one would be global and the powergating one would be per instance. See the attached patches. Alex > can be a sure shot indication of break in a video decode, the mistake in > my patch was using per instance mutex, I should have used a common > global mutex, then that covers the situation David is trying to bring out. > > Using one global vcn.pg_lock for idle and begin_use and using flags to > track power state could help us totally avoid this situation. > > Regards, > > Sathish > > On 8/13/2025 11:46 PM, Wu, David wrote: > > On 8/13/2025 12:51 PM, Alex Deucher wrote: > >> On Wed, Aug 13, 2025 at 12:39 PM Wu, David <david...@amd.com> wrote: > >>> Hi Alex, > >>> > >>> The addition of total_submission_cnt should work - in that > >>> it is unlikely to have a context switch right after the begin_use(). > >>> The suggestion of moving it inside the lock (which I prefer in case > >>> someone > >>> adds more before the lock and not reviewed thoroughly) > >>> - up to you to decide. > >>> > >>> Reviewed-by: David (Ming Qiang) Wu <david....@amd.com> > >>> > >>> Thanks, > >>> David > >>> On 8/13/2025 9:45 AM, Alex Deucher wrote: > >>>> If there are multiple instances of the VCN running, > >>>> we may end up switching the video profile while another > >>>> instance is active because we only take into account > >>>> the current instance's submissions. Look at all > >>>> outstanding fences for the video profile. > >>>> > >>>> v2: drop early exit in begin_use() > >>>> v3: handle possible race between begin_use() work handler > >>>> > >>>> Fixes: 3b669df92c85 ("drm/amdgpu/vcn: adjust workload profile > >>>> handling") > >>>> Reviewed-by: Sathishkumar S <sathishkumar.sundarar...@amd.com> (v1) > >>>> Signed-off-by: Alex Deucher <alexander.deuc...@amd.com> > >>>> --- > >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 40 > >>>> ++++++++++++------------- > >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + > >>>> 2 files changed, 21 insertions(+), 20 deletions(-) > >>>> > >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c > >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c > >>>> index 9a76e11d1c184..593c1ddf8819b 100644 > >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c > >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c > >>>> @@ -415,19 +415,25 @@ static void > >>>> amdgpu_vcn_idle_work_handler(struct work_struct *work) > >>>> struct amdgpu_vcn_inst *vcn_inst = > >>>> container_of(work, struct amdgpu_vcn_inst, > >>>> idle_work.work); > >>>> struct amdgpu_device *adev = vcn_inst->adev; > >>>> - unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; > >>>> - unsigned int i = vcn_inst->inst, j; > >>>> + unsigned int total_fences = 0, > >>>> fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; > >>>> + unsigned int i, j; > >>>> int r = 0; > >>>> > >>>> - if (adev->vcn.harvest_config & (1 << i)) > >>>> + if (adev->vcn.harvest_config & (1 << vcn_inst->inst)) > >>>> return; > >>>> > >>>> - for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) > >>>> - fence[i] += > >>>> amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]); > >>>> + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { > >>>> + struct amdgpu_vcn_inst *v = &adev->vcn.inst[i]; > >>>> + > >>>> + for (j = 0; j < v->num_enc_rings; ++j) > >>>> + fence[i] += > >>>> amdgpu_fence_count_emitted(&v->ring_enc[j]); > >>>> + fence[i] += amdgpu_fence_count_emitted(&v->ring_dec); > >>>> + total_fences += fence[i]; > >>>> + } > >>>> > >>>> /* Only set DPG pause for VCN3 or below, VCN4 and above will > >>>> be handled by FW */ > >>>> if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && > >>>> - !adev->vcn.inst[i].using_unified_queue) { > >>>> + !vcn_inst->using_unified_queue) { > >>>> struct dpg_pause_state new_state; > >>>> > >>>> if (fence[i] || > >>>> @@ -436,18 +442,18 @@ static void > >>>> amdgpu_vcn_idle_work_handler(struct work_struct *work) > >>>> else > >>>> new_state.fw_based = VCN_DPG_STATE__UNPAUSE; > >>>> > >>>> - adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state); > >>>> + vcn_inst->pause_dpg_mode(vcn_inst, &new_state); > >>>> } > >>>> > >>>> - fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec); > >>>> - fences += fence[i]; > >>>> - > >>>> - if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { > >>>> + if (!fence[vcn_inst->inst] && > >>>> !atomic_read(&vcn_inst->total_submission_cnt)) { > >>>> + /* This is specific to this instance */ > >>>> mutex_lock(&vcn_inst->vcn_pg_lock); > >>>> vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE); > >>>> mutex_unlock(&vcn_inst->vcn_pg_lock); > >>>> mutex_lock(&adev->vcn.workload_profile_mutex); > >>>> - if (adev->vcn.workload_profile_active) { > >>>> + /* This is global and depends on all VCN instances */ > >>>> + if (adev->vcn.workload_profile_active && > >>>> !total_fences && > >>>> + !atomic_read(&adev->vcn.total_submission_cnt)) { > >>>> r = amdgpu_dpm_switch_power_profile(adev, > >>>> PP_SMC_POWER_PROFILE_VIDEO, > >>>> false); > >>>> if (r) > >>>> @@ -467,16 +473,10 @@ void amdgpu_vcn_ring_begin_use(struct > >>>> amdgpu_ring *ring) > >>>> int r = 0; > >>>> > >>>> atomic_inc(&vcn_inst->total_submission_cnt); > >>>> + atomic_inc(&adev->vcn.total_submission_cnt); > >>> move this addition down inside the mutex lock > >>>> cancel_delayed_work_sync(&vcn_inst->idle_work); > >>>> > >>>> - /* We can safely return early here because we've cancelled the > >>>> - * the delayed work so there is no one else to set it to false > >>>> - * and we don't care if someone else sets it to true. > >>>> - */ > >>>> - if (adev->vcn.workload_profile_active) > >>>> - goto pg_lock; > >>>> - > >>>> mutex_lock(&adev->vcn.workload_profile_mutex); > >>> move to here: > >>> atomic_inc(&adev->vcn.total_submission_cnt); > >>> I think this should work for multiple instances. > >> Why does this need to be protected by the mutex? > > hmm.. OK - no need and it is actually better before the mutex. > > David > >> Alex > >> > >>> David > >>>> if (!adev->vcn.workload_profile_active) { > >>>> r = amdgpu_dpm_switch_power_profile(adev, > >>>> PP_SMC_POWER_PROFILE_VIDEO, > >>>> @@ -487,7 +487,6 @@ void amdgpu_vcn_ring_begin_use(struct > >>>> amdgpu_ring *ring) > >>>> } > >>>> mutex_unlock(&adev->vcn.workload_profile_mutex); > >>>> > >>>> -pg_lock: > >>>> mutex_lock(&vcn_inst->vcn_pg_lock); > >>>> vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE); > >>>> > >>>> @@ -528,6 +527,7 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring > >>>> *ring) > >>>> atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); > >>>> > >>>> atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt); > >>>> + atomic_dec(&ring->adev->vcn.total_submission_cnt); > >>>> > >>>> schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work, > >>>> VCN_IDLE_TIMEOUT); > >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h > >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h > >>>> index b3fb1d0e43fc9..febc3ce8641ff 100644 > >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h > >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h > >>>> @@ -352,6 +352,7 @@ struct amdgpu_vcn { > >>>> > >>>> uint16_t inst_mask; > >>>> uint8_t num_inst_per_aid; > >>>> + atomic_t total_submission_cnt; > >>>> > >>>> /* IP reg dump */ > >>>> uint32_t *ip_dump; > >
From 8599112cfc94e8bcf472db38627fd9b3261b107a Mon Sep 17 00:00:00 2001 From: Alex Deucher <alexander.deuc...@amd.com> Date: Tue, 12 Aug 2025 11:38:09 -0400 Subject: [PATCH 1/2] drm/amdgpu/vcn: fix video profile race condition (v4) If there are multiple instances of the VCN running, we may end up switching the video profile while another instance is active because we only take into account the current instance's submissions. Split the into two work handlers, one for global state and one for per instance state. v2: drop early exit in begin_use() v3: handle possible race between begin_use() work handler v4: split into two handlers, one global and one per instance Fixes: 3b669df92c85 ("drm/amdgpu/vcn: adjust workload profile handling") Reviewed-by: David (Ming Qiang) Wu <david....@amd.com> Reviewed-by: Sathishkumar S <sathishkumar.sundarar...@amd.com> (v1) Signed-off-by: Alex Deucher <alexander.deuc...@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 56 +++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 4 ++ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 3 ++ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 3 ++ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 3 ++ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 3 ++ drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c | 3 ++ drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c | 3 ++ 9 files changed, 63 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fd8ebf4b5a824..ebf730f8ace43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -91,7 +91,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1); MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); MODULE_FIRMWARE(FIRMWARE_VCN5_0_1); -static void amdgpu_vcn_idle_work_handler(struct work_struct *work); +static void amdgpu_vcn_instance_work_handler(struct work_struct *work); static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev); int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i) @@ -137,7 +137,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) mutex_init(&adev->vcn.inst[i].vcn_pg_lock); mutex_init(&adev->vcn.inst[i].engine_reset_mutex); atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); - INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); + INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_instance_work_handler); atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) @@ -408,43 +408,61 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i) return 0; } -static void amdgpu_vcn_idle_work_handler(struct work_struct *work) +static void amdgpu_vcn_instance_work_handler(struct work_struct *work) { struct amdgpu_vcn_inst *vcn_inst = container_of(work, struct amdgpu_vcn_inst, idle_work.work); struct amdgpu_device *adev = vcn_inst->adev; - unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; - unsigned int i = vcn_inst->inst, j; - int r = 0; + unsigned int fences = 0; + unsigned int i; - if (adev->vcn.harvest_config & (1 << i)) + if (adev->vcn.harvest_config & (1 << vcn_inst->inst)) return; - for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) - fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]); + for (i = 0; i < vcn_inst->num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]); + fences += amdgpu_fence_count_emitted(&vcn_inst->ring_dec); /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && - !adev->vcn.inst[i].using_unified_queue) { + !vcn_inst->using_unified_queue) { struct dpg_pause_state new_state; - if (fence[i] || + if (fences || unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt))) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state); + vcn_inst->pause_dpg_mode(vcn_inst, &new_state); } - fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec); - fences += fence[i]; - if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { mutex_lock(&vcn_inst->vcn_pg_lock); vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE); mutex_unlock(&vcn_inst->vcn_pg_lock); - mutex_lock(&adev->vcn.workload_profile_mutex); + } else { + schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT); + } +} + +void amdgpu_vcn_global_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vcn.global_work.work); + unsigned int total_fences = 0; + unsigned int i, j; + int r = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *v = &adev->vcn.inst[i]; + + for (j = 0; j < v->num_enc_rings; ++j) + total_fences += amdgpu_fence_count_emitted(&v->ring_enc[j]); + total_fences += amdgpu_fence_count_emitted(&v->ring_dec); + } + + if (!total_fences && !atomic_read(&adev->vcn.total_submission_cnt)) { if (adev->vcn.workload_profile_active) { r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, false); @@ -454,7 +472,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) } mutex_unlock(&adev->vcn.workload_profile_mutex); } else { - schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT); + schedule_delayed_work(&adev->vcn.global_work, VCN_IDLE_TIMEOUT); } } @@ -464,8 +482,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me]; int r = 0; + atomic_inc(&adev->vcn.total_submission_cnt); atomic_inc(&vcn_inst->total_submission_cnt); + cancel_delayed_work_sync(&adev->vcn.global_work); cancel_delayed_work_sync(&vcn_inst->idle_work); /* We can safely return early here because we've cancelled the @@ -525,8 +545,10 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) !adev->vcn.inst[ring->me].using_unified_queue) atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + atomic_dec(&ring->adev->vcn.total_submission_cnt); atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt); + schedule_delayed_work(&adev->vcn.global_work, VCN_IDLE_TIMEOUT); schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work, VCN_IDLE_TIMEOUT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index b3fb1d0e43fc9..70d97a2ac7719 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -352,6 +352,8 @@ struct amdgpu_vcn { uint16_t inst_mask; uint8_t num_inst_per_aid; + atomic_t total_submission_cnt; + struct delayed_work global_work; /* IP reg dump */ uint32_t *ip_dump; @@ -565,4 +567,6 @@ int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev, const struct amdgpu_hwip_reg_entry *reg, u32 count); void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block); void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p); +void amdgpu_vcn_global_work_handler(struct work_struct *work); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index b115137ab2d69..068bb17c51c20 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -139,6 +139,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, @@ -322,6 +324,7 @@ static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_vcn_inst *vinst = adev->vcn.inst; cancel_delayed_work_sync(&vinst->idle_work); + cancel_delayed_work_sync(&adev->vcn.global_work); if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (vinst->cur_state != AMD_PG_STATE_GATE && diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 95173156f956a..280025a3a0cb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -177,6 +177,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) int vcn_doorbell_index = 0; struct amdgpu_device *adev = ip_block->adev; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); /* * Note: doorbell assignment is fixed for SRIOV multiple VCN engines * Formula: @@ -444,6 +445,7 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index ae9d408e50c37..38010f42bcee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -184,6 +184,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i, r; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -370,6 +372,7 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index d18aca3d663b9..0669df8d87314 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -161,6 +161,8 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, r, vcn_inst; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); @@ -373,6 +375,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 75c884a8f556b..65b34f9bee5a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -148,6 +148,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i, r; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; @@ -320,6 +322,7 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index 455f829b8bb99..882da3591ad4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -128,6 +128,8 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i, r; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn5_fw_shared *fw_shared; @@ -283,6 +285,7 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 7cb21e2b4eb0e..213b24b1d0cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -142,6 +142,8 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, r, vcn_inst; + INIT_DELAYED_WORK(&adev->vcn.global_work, amdgpu_vcn_global_work_handler); + /* VCN UNIFIED TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); @@ -318,6 +320,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; -- 2.50.1
From c67e8ec11ed1a80bdd48889edf2f0542ce7c39a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher <alexander.deuc...@amd.com> Date: Wed, 13 Aug 2025 16:03:20 -0400 Subject: [PATCH 2/2] drm/amdgpu/vcn2.5: switch to the global work handler All of the idle work on vcn2.5 is global, so use the new global work handler rather than abusing a single instance of the per instance work handler. Signed-off-by: Alex Deucher <alexander.deuc...@amd.com> --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 28 ++++++++++++--------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 3a7c137a83efb..75158f41b8461 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -109,11 +109,10 @@ static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN1 }; -static void vcn_v2_5_idle_work_handler(struct work_struct *work) +static void vcn_v2_5_global_work_handler(struct work_struct *work) { - struct amdgpu_vcn_inst *vcn_inst = - container_of(work, struct amdgpu_vcn_inst, idle_work.work); - struct amdgpu_device *adev = vcn_inst->adev; + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vcn.global_work.work); unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; unsigned int i, j; int r = 0; @@ -146,7 +145,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work) } - if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) { + if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); mutex_lock(&adev->vcn.workload_profile_mutex); @@ -159,7 +158,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work) } mutex_unlock(&adev->vcn.workload_profile_mutex); } else { - schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT); + schedule_delayed_work(&adev->vcn.global_work, VCN_IDLE_TIMEOUT); } } @@ -169,9 +168,9 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring) struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me]; int r = 0; - atomic_inc(&adev->vcn.inst[0].total_submission_cnt); + atomic_inc(&adev->vcn.total_submission_cnt); - cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work); + cancel_delayed_work_sync(&adev->vcn.global_work); /* We can safely return early here because we've cancelled the * the delayed work so there is no one else to set it to false @@ -230,10 +229,9 @@ static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring) !adev->vcn.inst[ring->me].using_unified_queue) atomic_dec(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); - atomic_dec(&adev->vcn.inst[0].total_submission_cnt); + atomic_dec(&adev->vcn.total_submission_cnt); - schedule_delayed_work(&adev->vcn.inst[0].idle_work, - VCN_IDLE_TIMEOUT); + schedule_delayed_work(&adev->vcn.global_work, VCN_IDLE_TIMEOUT); } /** @@ -299,6 +297,8 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) int i, j, r; struct amdgpu_device *adev = ip_block->adev; + INIT_DELAYED_WORK(&adev->vcn.global_work, vcn_v2_5_global_work_handler); + for (j = 0; j < adev->vcn.num_vcn_inst; j++) { volatile struct amdgpu_fw_shared *fw_shared; @@ -328,9 +328,6 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* Override the work func */ - adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler; - amdgpu_vcn_setup_ucode(adev, j); r = amdgpu_vcn_resume(adev, j); @@ -533,14 +530,13 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; + cancel_delayed_work_sync(&adev->vcn.global_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; if (adev->vcn.harvest_config & (1 << i)) continue; - cancel_delayed_work_sync(&vinst->idle_work); - if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (vinst->cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) -- 2.50.1