On 1/29/26 2:15 AM, Chen Ridong wrote:

On 2026/1/29 12:03, Chen Ridong wrote:

On 2026/1/28 12:42, Waiman Long wrote:
The update_isolation_cpumasks() function can be called either directly
from regular cpuset control file write with cpuset_full_lock() called
or via the CPU hotplug path with cpus_write_lock and cpuset_mutex held.

As we are going to enable dynamic update to the nozh_full housekeeping
cpumask (HK_TYPE_KERNEL_NOISE) soon with the help of CPU hotplug,
allowing the CPU hotplug path to call into housekeeping_update()
directly from update_isolation_cpumasks() will cause deadlock. So we
have to defer any call to housekeeping_update() after the CPU hotplug
operation has finished. This can be done via the task_work_add(...,
TWA_RESUME) API where the actual housekeeping_update() call, if needed,
will happen right before existing back to userspace.

Since the HK_TYPE_DOMAIN housekeeping cpumask should now track the
changes in "cpuset.cpus.isolated", add a check in test_cpuset_prs.sh to
confirm that the CPU hotplug deferral, if needed, is working as expected.

Signed-off-by: Waiman Long <[email protected]>
---
  kernel/cgroup/cpuset.c                        | 49 ++++++++++++++++++-
  .../selftests/cgroup/test_cpuset_prs.sh       |  9 ++++
  2 files changed, 56 insertions(+), 2 deletions(-)

diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 7b7d12ab1006..98c7cb732206 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -84,6 +84,10 @@ static cpumask_var_t isolated_cpus;
   */
  static bool isolated_cpus_updating;
+/* Both cpuset_mutex and cpus_read_locked acquired */
+static bool cpuset_full_locked;
+static bool isolation_task_work_queued;
+
  /*
   * A flag to force sched domain rebuild at the end of an operation.
   * It can be set in
@@ -285,10 +289,12 @@ void cpuset_full_lock(void)
  {
        cpus_read_lock();
        mutex_lock(&cpuset_mutex);
+       cpuset_full_locked = true;
  }
void cpuset_full_unlock(void)
  {
+       cpuset_full_locked = false;
        mutex_unlock(&cpuset_mutex);
        cpus_read_unlock();
  }
@@ -1285,25 +1291,64 @@ static bool prstate_housekeeping_conflict(int prstate, 
struct cpumask *new_cpus)
        return false;
  }
+static void __update_isolation_cpumasks(bool twork);
+static void isolation_task_work_fn(struct callback_head *cb)
+{
+       cpuset_full_lock();
+       __update_isolation_cpumasks(true);
+       cpuset_full_lock();
+}
+
  /*
- * update_isolation_cpumasks - Update external isolation related CPU masks
+ * __update_isolation_cpumasks - Update external isolation related CPU masks
+ * @twork - set if call from isolation_task_work_fn()
   *
   * The following external CPU masks will be updated if necessary:
   * - workqueue unbound cpumask
   */
-static void update_isolation_cpumasks(void)
+static void __update_isolation_cpumasks(bool twork)
  {
        int ret;
+ if (twork)
+               isolation_task_work_queued = false;
+
        if (!isolated_cpus_updating)
                return;
+ /*
+        * This function can be reached either directly from regular cpuset
+        * control file write (cpuset_full_locked) or via hotplug
+        * (cpus_write_lock && cpuset_mutex held). In the later case, we
+        * defer the housekeeping_update() call to a task_work to avoid
+        * the possibility of deadlock. The task_work will be run right
+        * before exiting back to userspace.
+        */
+       if (!cpuset_full_locked) {
+               static struct callback_head twork_cb;
+
+               if (!isolation_task_work_queued) {
+                       init_task_work(&twork_cb, isolation_task_work_fn);
+                       if (!task_work_add(current, &twork_cb, TWA_RESUME))
+                               isolation_task_work_queued = true;
+                       else
+                               /* Current task shouldn't be exiting */
+                               WARN_ON_ONCE(1);
+               }
+               return;
+       }
+
        ret = housekeeping_update(isolated_cpus);
        WARN_ON_ONCE(ret < 0);
isolated_cpus_updating = false;
  }
The logic is not straightforward; perhaps we can simplify it as follows,
maybe I missed something, just correct me.

static void isolation_task_work_fn(struct callback_head *cb)
{
        guard(mutex)(&isolcpus_update_mutex);
        WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0);
}

/*
  * __update_isolation_cpumasks - Update external isolation related CPU masks
  * @twork - set if call from isolation_task_work_fn()
  *
  * The following external CPU masks will be updated if necessary:
  * - workqueue unbound cpumask
  */
static void __update_isolation_cpumasks(bool twork)
{
        if (!isolated_cpus_updating)
                return;

        /*
         * This function can be reached either directly from regular cpuset
         * control file write (cpuset_full_locked) or via hotplug
         * (cpus_write_lock && cpuset_mutex held). In the later case, we
         * defer the housekeeping_update() call to a task_work to avoid
         * the possibility of deadlock. The task_work will be run right
         * before exiting back to userspace.
         */
        if (twork) {
                static struct callback_head twork_cb;

                init_task_work(&twork_cb, isolation_task_work_fn);
                if (task_work_add(current, &twork_cb, TWA_RESUME))
                        /* Current task shouldn't be exiting */
                        WARN_ON_ONCE(1);

                return;
        }

        lockdep_assert_held(&isolcpus_update_mutex);
        /*
         * Release cpus_read_lock & cpuset_mutex before calling
         * housekeeping_update() and re-acquiring them afterward if not
         * calling from task_work.
         */

        cpuset_full_unlock();
        WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0);
        cpuset_full_lock();

        isolated_cpus_updating = false;
}

static inline void update_isolation_cpumasks(void)
{
        __update_isolation_cpumasks(false);
}

It can be much clearer:

static void isolation_task_work_fn(struct callback_head *cb)
{
        guard(mutex)(&isolcpus_update_mutex);
        WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0);
}

/*
  * __update_isolation_cpumasks - Update external isolation related CPU masks
  * @defer
  *
  * The following external CPU masks will be updated if necessary:
  * - workqueue unbound cpumask
  */
static void __update_isolation_cpumasks(bool defer)
{
        if (!isolated_cpus_updating)
                return;

        /*
         * This function can be reached either directly from regular cpuset
         * control file write (cpuset_full_locked) or via hotplug
         * (cpus_write_lock && cpuset_mutex held). In the later case, we
         * defer the housekeeping_update() call to a task_work to avoid
         * the possibility of deadlock. The task_work will be run right
         * before exiting back to userspace.
         */
        if (defer) {
                static struct callback_head twork_cb;

                init_task_work(&twork_cb, isolation_task_work_fn);
                if (task_work_add(current, &twork_cb, TWA_RESUME))
                        /* Current task shouldn't be exiting */
                        WARN_ON_ONCE(1);

                return;
        }

        lockdep_assert_held(&isolcpus_update_mutex);
        lockdep_assert_cpus_held();
        lockdep_assert_cpuset_lock_held();

        /*
         * Release cpus_read_lock & cpuset_mutex before calling
         * housekeeping_update() and re-acquiring them afterward if not
         * calling from task_work.
         */

        cpuset_full_unlock();
        WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0);
        cpuset_full_lock();

        isolated_cpus_updating = false;
}

static inline void update_isolation_cpumasks(void)
{
        __update_isolation_cpumasks(false);
}

static inline void asyn_update_isolation_cpumasks(void)
{
        __update_isolation_cpumasks(true);
}

The hotplug path just calls asyn_update_isolation_cpumasks(), cpuset_full_locked
and isolation_task_work_queued can be removed.

Thanks for the suggestions. I will adopt some of them. BTW, I am switching to use workqueue instead of the task_work as the latter is suitable in this use case.

Cheers,
Longman


Reply via email to