On Sat, Jan 24, 2026 at 03:54:00PM +0100, Marco Crivellari wrote:
> This patch continues the effort to refactor workqueue APIs, which has begun
> with the changes introducing new workqueues and a new alloc_workqueue flag:
> 
>    commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
>    commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
> 
> The point of the refactoring is to eventually alter the default behavior of
> workqueues to become unbound by default so that their workload placement is
> optimized by the scheduler.
> 
> Before that to happen, workqueue users must be converted to the better named
> new workqueues with no intended behaviour changes:
> 
>    system_wq -> system_percpu_wq
>    system_unbound_wq -> system_dfl_wq
> 
> This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
> removed in the future.

Thanks for improving the message.

Could you please rebase on top of latest drm-tip. It is not applying as is.

Thanks,
Rodrigo.

> 
> Link: https://lore.kernel.org/all/[email protected]/
> Suggested-by: Tejun Heo <[email protected]>
> Signed-off-by: Marco Crivellari <[email protected]>

> ---
>  drivers/gpu/drm/xe/xe_devcoredump.c | 2 +-
>  drivers/gpu/drm/xe/xe_execlist.c    | 2 +-
>  drivers/gpu/drm/xe/xe_guc_ct.c      | 4 ++--
>  drivers/gpu/drm/xe/xe_oa.c          | 2 +-
>  drivers/gpu/drm/xe/xe_vm.c          | 4 ++--
>  5 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c 
> b/drivers/gpu/drm/xe/xe_devcoredump.c
> index d444eda65ca6..6b47aaf8cc9f 100644
> --- a/drivers/gpu/drm/xe/xe_devcoredump.c
> +++ b/drivers/gpu/drm/xe/xe_devcoredump.c
> @@ -362,7 +362,7 @@ static void devcoredump_snapshot(struct xe_devcoredump 
> *coredump,
>  
>       xe_engine_snapshot_capture_for_queue(q);
>  
> -     queue_work(system_unbound_wq, &ss->work);
> +     queue_work(system_dfl_wq, &ss->work);
>  
>       xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
>       dma_fence_end_signalling(cookie);
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c 
> b/drivers/gpu/drm/xe/xe_execlist.c
> index 769d05517f93..730b600a5803 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -422,7 +422,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue 
> *q)
>  static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
>  {
>       INIT_WORK(&q->execlist->destroy_async, 
> execlist_exec_queue_destroy_async);
> -     queue_work(system_unbound_wq, &q->execlist->destroy_async);
> +     queue_work(system_dfl_wq, &q->execlist->destroy_async);
>  }
>  
>  static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index a5019d1e741b..351c9986f6cf 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -558,7 +558,7 @@ static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool 
> needs_register)
>       spin_lock_irq(&ct->dead.lock);
>       if (ct->dead.reason) {
>               ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
> -             queue_work(system_unbound_wq, &ct->dead.worker);
> +             queue_work(system_dfl_wq, &ct->dead.worker);
>       }
>       spin_unlock_irq(&ct->dead.lock);
>  #endif
> @@ -2093,7 +2093,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, 
> struct guc_ctb *ctb, u32 reaso
>  
>       spin_unlock_irqrestore(&ct->dead.lock, flags);
>  
> -     queue_work(system_unbound_wq, &(ct)->dead.worker);
> +     queue_work(system_dfl_wq, &(ct)->dead.worker);
>  }
>  
>  static void ct_dead_print(struct xe_dead_ct *dead)
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index f8bb28ab8124..c8e65e38081c 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -969,7 +969,7 @@ static void xe_oa_config_cb(struct dma_fence *fence, 
> struct dma_fence_cb *cb)
>       struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
>  
>       INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
> -     queue_delayed_work(system_unbound_wq, &ofence->work,
> +     queue_delayed_work(system_dfl_wq, &ofence->work,
>                          usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
>       dma_fence_put(fence);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 095bb197e8b0..ddf0a9567614 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1091,7 +1091,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
>       struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
>  
>       INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
> -     queue_work(system_unbound_wq, &vma->destroy_work);
> +     queue_work(system_dfl_wq, &vma->destroy_work);
>  }
>  
>  static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
> @@ -1854,7 +1854,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
>       struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
>  
>       /* To destroy the VM we need to be able to sleep */
> -     queue_work(system_unbound_wq, &vm->destroy_work);
> +     queue_work(system_dfl_wq, &vm->destroy_work);
>  }
>  
>  struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
> -- 
> 2.52.0
> 

Reply via email to