Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistency cannot be addressed without refactoring the API.

The above change to the Workqueue API has been introduced by:

commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <[email protected]>
Signed-off-by: Marco Crivellari <[email protected]>
---
 drivers/gpu/drm/xe/xe_devcoredump.c | 2 +-
 drivers/gpu/drm/xe/xe_execlist.c    | 2 +-
 drivers/gpu/drm/xe/xe_guc_ct.c      | 4 ++--
 drivers/gpu/drm/xe/xe_oa.c          | 2 +-
 drivers/gpu/drm/xe/xe_vm.c          | 4 ++--
 5 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c 
b/drivers/gpu/drm/xe/xe_devcoredump.c
index 203e3038cc81..806335487021 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -362,7 +362,7 @@ static void devcoredump_snapshot(struct xe_devcoredump 
*coredump,
 
        xe_engine_snapshot_capture_for_queue(q);
 
-       queue_work(system_unbound_wq, &ss->work);
+       queue_work(system_dfl_wq, &ss->work);
 
        xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
        dma_fence_end_signalling(cookie);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index f83d421ac9d3..99010709f0d2 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -422,7 +422,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue 
*q)
 static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
 {
        INIT_WORK(&q->execlist->destroy_async, 
execlist_exec_queue_destroy_async);
-       queue_work(system_unbound_wq, &q->execlist->destroy_async);
+       queue_work(system_dfl_wq, &q->execlist->destroy_async);
 }
 
 static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 18f6327bf552..bc2ec3603e7b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -543,7 +543,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
        spin_lock_irq(&ct->dead.lock);
        if (ct->dead.reason) {
                ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
-               queue_work(system_unbound_wq, &ct->dead.worker);
+               queue_work(system_dfl_wq, &ct->dead.worker);
        }
        spin_unlock_irq(&ct->dead.lock);
 #endif
@@ -2186,7 +2186,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct 
guc_ctb *ctb, u32 reaso
 
        spin_unlock_irqrestore(&ct->dead.lock, flags);
 
-       queue_work(system_unbound_wq, &(ct)->dead.worker);
+       queue_work(system_dfl_wq, &(ct)->dead.worker);
 }
 
 static void ct_dead_print(struct xe_dead_ct *dead)
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index a4894eb0d7f3..4e362cd43d51 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -967,7 +967,7 @@ static void xe_oa_config_cb(struct dma_fence *fence, struct 
dma_fence_cb *cb)
        struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
 
        INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
-       queue_delayed_work(system_unbound_wq, &ofence->work,
+       queue_delayed_work(system_dfl_wq, &ofence->work,
                           usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
        dma_fence_put(fence);
 }
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 63c65e3d207b..d3a0e0231cd1 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1064,7 +1064,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
        struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
 
        INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
-       queue_work(system_unbound_wq, &vma->destroy_work);
+       queue_work(system_dfl_wq, &vma->destroy_work);
 }
 
 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
@@ -1823,7 +1823,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
        struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
 
        /* To destroy the VM we need to be able to sleep */
-       queue_work(system_unbound_wq, &vm->destroy_work);
+       queue_work(system_dfl_wq, &vm->destroy_work);
 }
 
 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
-- 
2.51.1

Reply via email to