This patch adds a function which will clear the GPU
power profile after job finished.

This is how it works:
- schedular will set the GPU power profile based on ring_type.
- Schedular will clear the GPU Power profile once job finished.
- Here, the *_workload_profile_set function will set the GPU
  power profile and the *_workload_profile_put function will
  schedule the smu_delayed_work task after 100ms delay. This
  smu_delayed_work task will clear a GPU power profile if any
  new jobs are not scheduled within 100 ms. But if any new job
  comes within 100ms then the *_workload_profile_set function
  will cancel this work and set the GPU power profile based on
  preferences.

v2:
- Splitting workload_profile_set and workload_profile_put
  into two separate patches.
- Addressed review comment.

v3:
- Adressed all the review comment.
- Now clearing all the profile in work handler.
- Added *_clear_all function to clear all the power profile.
- scheduling delay work to clear the power profile when refcount
  becomes zero.

Cc: Shashank Sharma <shashank.sha...@amd.com>
Cc: Christian Koenig <christian.koe...@amd.com>
Cc: Alex Deucher <alexander.deuc...@amd.com>
Signed-off-by: Arvind Yadav <arvind.ya...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c  | 118 +++++++++++++++++-
 drivers/gpu/drm/amd/include/amdgpu_workload.h |   3 +
 2 files changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
index 67eacaac6c9b..fbe86ee5b8bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
@@ -24,6 +24,9 @@
 
 #include "amdgpu.h"
 
+/* 100 millsecond timeout */
+#define SMU_IDLE_TIMEOUT       msecs_to_jiffies(100)
+
 static enum PP_SMC_POWER_PROFILE
 ring_to_power_profile(uint32_t ring_type)
 {
@@ -58,16 +61,111 @@ amdgpu_power_profile_set(struct amdgpu_device *adev,
        return ret;
 }
 
+static int
+amdgpu_power_profile_clear(struct amdgpu_device *adev,
+                          enum PP_SMC_POWER_PROFILE profile)
+{
+       int ret = amdgpu_dpm_switch_power_profile(adev, profile, false);
+
+       if (!ret) {
+               /* Clear the bit for the submitted workload profile */
+               clear_bit(profile, &adev->smu_workload.submit_workload_status);
+       }
+
+       return ret;
+}
+
+static void
+amdgpu_power_profile_clear_all(struct amdgpu_device *adev,
+                              struct amdgpu_smu_workload *workload)
+{
+       int ret;
+       int profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+       cancel_delayed_work_sync(&workload->power_profile_work);
+       mutex_lock(&workload->workload_lock);
+
+       /* Clear all the GPU power profile*/
+       for (; profile > 0; profile--) {
+               atomic_set(&workload->power_profile_ref[profile], 0);
+               ret = amdgpu_power_profile_clear(adev, profile);
+               if (ret) {
+                       DRM_WARN("Failed to clear workload %s,error = %d\n",
+                                amdgpu_workload_mode_name[profile], ret);
+               }
+       }
+
+       workload->submit_workload_status = 0;
+       mutex_unlock(&workload->workload_lock);
+}
+
+static void
+amdgpu_power_profile_idle_work_handler(struct work_struct *work)
+{
+
+       struct amdgpu_smu_workload *workload = container_of(work,
+                                                     struct 
amdgpu_smu_workload,
+                                                     power_profile_work.work);
+       struct amdgpu_device *adev = workload->adev;
+       int ret;
+       int profile;
+
+       mutex_lock(&workload->workload_lock);
+
+       /* Clear all the GPU power profile*/
+       for_each_set_bit(profile, &workload->submit_workload_status,
+                        PP_SMC_POWER_PROFILE_CUSTOM) {
+               if (!atomic_read(&workload->power_profile_ref[profile])) {
+                       ret = amdgpu_power_profile_clear(adev, profile);
+                       if (ret) {
+                               DRM_WARN("Failed to clear workload %s,error = 
%d\n",
+                                        amdgpu_workload_mode_name[profile], 
ret);
+                       }
+               }
+       }
+
+       mutex_unlock(&workload->workload_lock);
+}
+
+void amdgpu_workload_profile_put(struct amdgpu_device *adev,
+                                uint32_t ring_type)
+{
+       struct amdgpu_smu_workload *workload = &adev->smu_workload;
+       enum PP_SMC_POWER_PROFILE profile = ring_to_power_profile(ring_type);
+       int refcount;
+
+       if (profile == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT)
+               return;
+
+       mutex_lock(&workload->workload_lock);
+
+       refcount = atomic_read(&workload->power_profile_ref[profile]);
+       if (!refcount) {
+               DRM_WARN("Power profile %s ref. count error\n",
+                        amdgpu_workload_mode_name[profile]);
+       } else {
+               if (refcount == 1)
+                       schedule_delayed_work(&workload->power_profile_work,
+                                             SMU_IDLE_TIMEOUT);
+
+               atomic_dec(&workload->power_profile_ref[profile]);
+       }
+
+       mutex_unlock(&workload->workload_lock);
+}
+
 void amdgpu_workload_profile_get(struct amdgpu_device *adev,
                                 uint32_t ring_type)
 {
        struct amdgpu_smu_workload *workload = &adev->smu_workload;
        enum PP_SMC_POWER_PROFILE profile = ring_to_power_profile(ring_type);
        int ret, refcount;
+       int index;
 
        if (profile == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT)
                return;
 
+       cancel_delayed_work_sync(&workload->power_profile_work);
        mutex_lock(&workload->workload_lock);
 
        refcount = atomic_read(&workload->power_profile_ref[profile]);
@@ -80,6 +178,22 @@ void amdgpu_workload_profile_get(struct amdgpu_device *adev,
        }
 
        atomic_inc(&adev->smu_workload.power_profile_ref[profile]);
+
+       /* As we cancelled the delayed work, check and clear the pending
+        * power profiles set by previous jobs which are now done.
+        */
+       for_each_set_bit(index, &workload->submit_workload_status,
+                        PP_SMC_POWER_PROFILE_CUSTOM) {
+               if (!atomic_read(&workload->power_profile_ref[index]) &&
+                   (index != profile)) {
+                       ret = amdgpu_power_profile_clear(adev, index);
+                       if (ret) {
+                               DRM_WARN("Failed to clear workload %s, err = 
%d\n",
+                                        amdgpu_workload_mode_name[profile], 
ret);
+                       }
+               }
+       }
+
        mutex_unlock(&workload->workload_lock);
 }
 
@@ -90,6 +204,8 @@ void amdgpu_workload_profile_init(struct amdgpu_device *adev)
        adev->smu_workload.initialized = true;
 
        mutex_init(&adev->smu_workload.workload_lock);
+       INIT_DELAYED_WORK(&adev->smu_workload.power_profile_work,
+                         amdgpu_power_profile_idle_work_handler);
 }
 
 void amdgpu_workload_profile_fini(struct amdgpu_device *adev)
@@ -97,7 +213,7 @@ void amdgpu_workload_profile_fini(struct amdgpu_device *adev)
        if (!adev->smu_workload.initialized)
                return;
 
-       adev->smu_workload.submit_workload_status = 0;
+       amdgpu_power_profile_clear_all(adev, &adev->smu_workload);
        adev->smu_workload.initialized = false;
        mutex_destroy(&adev->smu_workload.workload_lock);
 }
diff --git a/drivers/gpu/drm/amd/include/amdgpu_workload.h 
b/drivers/gpu/drm/amd/include/amdgpu_workload.h
index 5fc0bc2a74a4..596a962800e9 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_workload.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_workload.h
@@ -46,6 +46,9 @@ static const char * const amdgpu_workload_mode_name[] = {
        "Window3D"
 };
 
+void amdgpu_workload_profile_put(struct amdgpu_device *adev,
+                                uint32_t ring_type);
+
 void amdgpu_workload_profile_get(struct amdgpu_device *adev,
                                 uint32_t ring_type);
 
-- 
2.34.1

Reply via email to