The update_isolation_cpumasks() function can be called either directly from regular cpuset control file write with cpuset_full_lock() called or via the CPU hotplug path with cpus_write_lock and cpuset_mutex held.
As we are going to enable dynamic update to the nozh_full housekeeping cpumask (HK_TYPE_KERNEL_NOISE) soon with the help of CPU hotplug, allowing the CPU hotplug path to call into housekeeping_update() directly from update_isolation_cpumasks() will likely cause deadlock. So we have to defer any call to housekeeping_update() after the CPU hotplug operation has finished. This is now done via the workqueue where the actual housekeeping_update() call, if needed, will happen after cpus_write_lock is released. We can't use the synchronous task_work API as call from CPU hotplug path happen in the per-cpu kthread of the CPU that is being shut down or brought up. Because of the asynchronous nature of workqueue, the HK_TYPE_DOMAIN housekeeping cpumask will be updated a bit later than the "cpuset.cpus.isolated" control file in this case. Also add a check in test_cpuset_prs.sh and modify some existing test cases to confirm that "cpuset.cpus.isolated" and HK_TYPE_DOMAIN housekeeping cpumask will both be updated. Signed-off-by: Waiman Long <[email protected]> --- kernel/cgroup/cpuset.c | 37 +++++++++++++++++-- .../selftests/cgroup/test_cpuset_prs.sh | 13 +++++-- 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 7b7d12ab1006..0b0eb1df09d5 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -84,6 +84,9 @@ static cpumask_var_t isolated_cpus; */ static bool isolated_cpus_updating; +/* Both cpuset_mutex and cpus_read_locked acquired */ +static bool cpuset_locked; + /* * A flag to force sched domain rebuild at the end of an operation. * It can be set in @@ -285,10 +288,12 @@ void cpuset_full_lock(void) { cpus_read_lock(); mutex_lock(&cpuset_mutex); + cpuset_locked = true; } void cpuset_full_unlock(void) { + cpuset_locked = false; mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } @@ -1285,6 +1290,16 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) return false; } +static void isolcpus_workfn(struct work_struct *work) +{ + cpuset_full_lock(); + if (isolated_cpus_updating) { + WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0); + isolated_cpus_updating = false; + } + cpuset_full_unlock(); +} + /* * update_isolation_cpumasks - Update external isolation related CPU masks * @@ -1293,14 +1308,30 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) */ static void update_isolation_cpumasks(void) { - int ret; + static DECLARE_WORK(isolcpus_work, isolcpus_workfn); if (!isolated_cpus_updating) return; - ret = housekeeping_update(isolated_cpus); - WARN_ON_ONCE(ret < 0); + /* + * This function can be reached either directly from regular cpuset + * control file write (cpuset_locked) or via hotplug (cpus_write_lock + * && cpuset_mutex held). In the later case, we defer the + * housekeeping_update() call to the system_unbound_wq to avoid the + * possibility of deadlock. This also means that there will be a short + * period of time where HK_TYPE_DOMAIN housekeeping cpumask will lag + * behind isolated_cpus. + */ + if (!cpuset_locked) { + /* + * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work + * item that is still pending. + */ + queue_work(system_unbound_wq, &isolcpus_work); + return; + } + WARN_ON_ONCE(housekeeping_update(isolated_cpus) < 0); isolated_cpus_updating = false; } diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh index 5dff3ad53867..0502b156582b 100755 --- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh +++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh @@ -245,8 +245,9 @@ TEST_MATRIX=( "C2-3:P1:S+ C3:P2 . . O2=0 O2=1 . . 0 A1:2|A2:3 A1:P1|A2:P2" "C2-3:P1:S+ C3:P1 . . O2=0 . . . 0 A1:|A2:3 A1:P1|A2:P1" "C2-3:P1:S+ C3:P1 . . O3=0 . . . 0 A1:2|A2: A1:P1|A2:P1" - "C2-3:P1:S+ C3:P1 . . T:O2=0 . . . 0 A1:3|A2:3 A1:P1|A2:P-1" - "C2-3:P1:S+ C3:P1 . . . T:O3=0 . . 0 A1:2|A2:2 A1:P1|A2:P-1" + "C2-3:P1:S+ C3:P2 . . T:O2=0 . . . 0 A1:3|A2:3 A1:P1|A2:P-2" + "C1-3:P1:S+ C3:P2 . . . T:O3=0 . . 0 A1:1-2|A2:1-2 A1:P1|A2:P-2 3|" + "C1-3:P1:S+ C3:P2 . . . T:O3=0 O3=1 . 0 A1:1-2|A2:3 A1:P1|A2:P2 3" "$SETUP_A123_PARTITIONS . O1=0 . . . 0 A1:|A2:2|A3:3 A1:P1|A2:P1|A3:P1" "$SETUP_A123_PARTITIONS . O2=0 . . . 0 A1:1|A2:|A3:3 A1:P1|A2:P1|A3:P1" "$SETUP_A123_PARTITIONS . O3=0 . . . 0 A1:1|A2:2|A3: A1:P1|A2:P1|A3:P1" @@ -764,7 +765,7 @@ check_cgroup_states() # only CPUs in isolated partitions as well as those that are isolated at # boot time. # -# $1 - expected isolated cpu list(s) <isolcpus1>{,<isolcpus2>} +# $1 - expected isolated cpu list(s) <isolcpus1>{|<isolcpus2>} # <isolcpus1> - expected sched/domains value # <isolcpus2> - cpuset.cpus.isolated value = <isolcpus1> if not defined # @@ -773,6 +774,7 @@ check_isolcpus() EXPECTED_ISOLCPUS=$1 ISCPUS=${CGROUP2}/cpuset.cpus.isolated ISOLCPUS=$(cat $ISCPUS) + HKICPUS=$(cat /sys/devices/system/cpu/isolated) LASTISOLCPU= SCHED_DOMAINS=/sys/kernel/debug/sched/domains if [[ $EXPECTED_ISOLCPUS = . ]] @@ -810,6 +812,11 @@ check_isolcpus() ISOLCPUS= EXPECTED_ISOLCPUS=$EXPECTED_SDOMAIN + # + # The inverse of HK_TYPE_DOMAIN cpumask in $HKICPUS should match $ISOLCPUS + # + [[ "$ISOLCPUS" != "$HKICPUS" ]] && return 1 + # # Use the sched domain in debugfs to check isolated CPUs, if available # -- 2.52.0

