This patch continues the effort to refactor workqueue APIs, which has begun
with the changes introducing new workqueues and a new alloc_workqueue flag:

   commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
   commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

The point of the refactoring is to eventually alter the default behavior of
workqueues to become unbound by default so that their workload placement is
optimized by the scheduler.

Before that to happen after a careful review and conversion of each individual
case, workqueue users must be converted to the better named new workqueues with
no intended behaviour changes:

   system_wq -> system_percpu_wq
   system_unbound_wq -> system_dfl_wq

This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
removed in the future.

Suggested-by: Tejun Heo <[email protected]>
Signed-off-by: Marco Crivellari <[email protected]>
---
 drivers/gpu/drm/xe/xe_tlb_inval.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c 
b/drivers/gpu/drm/xe/xe_tlb_inval.c
index 918a59e686ea..b2cf6e17fbc5 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval.c
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -94,7 +94,7 @@ static void xe_tlb_inval_fence_timeout(struct work_struct 
*work)
                xe_tlb_inval_fence_signal(fence);
        }
        if (!list_empty(&tlb_inval->pending_fences))
-               queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+               queue_delayed_work(system_percpu_wq, &tlb_inval->fence_tdr,
                                   timeout_delay);
        spin_unlock_irq(&tlb_inval->pending_lock);
 }
@@ -226,7 +226,7 @@ static void xe_tlb_inval_fence_prep(struct 
xe_tlb_inval_fence *fence)
        list_add_tail(&fence->link, &tlb_inval->pending_fences);
 
        if (list_is_singular(&tlb_inval->pending_fences))
-               queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+               queue_delayed_work(system_percpu_wq, &tlb_inval->fence_tdr,
                                   tlb_inval->ops->timeout_delay(tlb_inval));
        spin_unlock_irq(&tlb_inval->pending_lock);
 
@@ -378,7 +378,7 @@ void xe_tlb_inval_done_handler(struct xe_tlb_inval 
*tlb_inval, int seqno)
        }
 
        if (!list_empty(&tlb_inval->pending_fences))
-               mod_delayed_work(system_wq,
+               mod_delayed_work(system_percpu_wq,
                                 &tlb_inval->fence_tdr,
                                 tlb_inval->ops->timeout_delay(tlb_inval));
        else
-- 
2.52.0

Reply via email to