To protect PMFW from being overloaded.

Signed-off-by: Evan Quan <evan.q...@amd.com>
Reviewed-by: Mario Limonciello <mario.limoncie...@amd.com>
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 31 +++++++++++++++----
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |  7 +++++
 2 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 83d428e890df..aa7faeafc86b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1278,7 +1278,8 @@ static int smu_wbrf_event_handler(struct notifier_block 
*nb,
 
        switch (action) {
        case WBRF_CHANGED:
-               smu_wbrf_handle_exclusion_ranges(smu);
+               schedule_delayed_work(&smu->wbrf_delayed_work,
+                                     
msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
                break;
        default:
                return NOTIFY_DONE;
@@ -1287,6 +1288,21 @@ static int smu_wbrf_event_handler(struct notifier_block 
*nb,
        return NOTIFY_OK;
 }
 
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+       struct smu_context *smu =
+               container_of(work, struct smu_context, wbrf_delayed_work.work);
+
+       smu_wbrf_handle_exclusion_ranges(smu);
+}
+
 /**
  * smu_wbrf_support_check - check wbrf support
  *
@@ -1317,12 +1333,14 @@ static void smu_wbrf_support_check(struct smu_context 
*smu)
  */
 static int smu_wbrf_init(struct smu_context *smu)
 {
-       struct amdgpu_device *adev = smu->adev;
        int ret;
 
        if (!smu->wbrf_supported)
                return 0;
 
+       INIT_DELAYED_WORK(&smu->wbrf_delayed_work,
+                         smu_wbrf_delayed_work_handler);
+
        smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
        ret = wbrf_register_notifier(&smu->wbrf_notifier);
        if (ret)
@@ -1333,11 +1351,10 @@ static int smu_wbrf_init(struct smu_context *smu)
         * before our driver loaded. To make sure our driver
         * is awared of those exclusion ranges.
         */
-       ret = smu_wbrf_handle_exclusion_ranges(smu);
-       if (ret)
-               dev_err(adev->dev, "Failed to handle wbrf exclusion ranges\n");
+       schedule_delayed_work(&smu->wbrf_delayed_work,
+                             msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -1353,6 +1370,8 @@ static void smu_wbrf_fini(struct smu_context *smu)
                return;
 
        wbrf_unregister_notifier(&smu->wbrf_notifier);
+
+       cancel_delayed_work_sync(&smu->wbrf_delayed_work);
 }
 
 static int smu_smc_hw_setup(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 5b2343cfc69b..5df28d4a8c30 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -480,6 +480,12 @@ struct stb_context {
 
 #define WORKLOAD_POLICY_MAX 7
 
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE   10
+
 struct smu_context
 {
        struct amdgpu_device            *adev;
@@ -579,6 +585,7 @@ struct smu_context
        /* data structures for wbrf feature support */
        bool                            wbrf_supported;
        struct notifier_block           wbrf_notifier;
+       struct delayed_work             wbrf_delayed_work;
 };
 
 struct i2c_adapter;
-- 
2.34.1

Reply via email to