On 7/2/25 18:33, Daniel Wagner wrote:
group_mask_cpus_evenly() allows the caller to pass in a CPU mask that
should be evenly distributed. This new function is a more generic
version of the existing group_cpus_evenly(), which always distributes
all present CPUs into groups.

Signed-off-by: Daniel Wagner <w...@kernel.org>
---
  include/linux/group_cpus.h |  3 +++
  lib/group_cpus.c           | 64 +++++++++++++++++++++++++++++++++++++++++++++-
  2 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
index 
9d4e5ab6c314b31c09fda82c3f6ac18f77e9de36..d4604dce1316a08400e982039006331f34c18ee8
 100644
--- a/include/linux/group_cpus.h
+++ b/include/linux/group_cpus.h
@@ -10,5 +10,8 @@
  #include <linux/cpu.h>
struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks);
+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+                                      const struct cpumask *cpu_mask,
+                                      unsigned int *nummasks);
#endif
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index 
6d08ac05f371bf880571507d935d9eb501616a84..00c9b7a10c8acd29239fe20d2a30fdae22ef74a5
 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -8,6 +8,7 @@
  #include <linux/cpu.h>
  #include <linux/sort.h>
  #include <linux/group_cpus.h>
+#include <linux/sched/isolation.h>
#ifdef CONFIG_SMP @@ -425,6 +426,59 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
        *nummasks = min(nr_present + nr_others, numgrps);
        return masks;
  }
+EXPORT_SYMBOL_GPL(group_cpus_evenly);
+
+/**
+ * group_mask_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ * @cpu_mask: CPU to consider for the grouping
+ * @nummasks: number of initialized cpusmasks
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group.
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group. Allocate present CPUs on these groups evenly.
+ */

Description could be improved. Point is that you do not do any
calculation here, you just call __group_cpus_evenly() with
a different mask.

+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+                                      const struct cpumask *cpu_mask,
+                                      unsigned int *nummasks)
+{
+       cpumask_var_t *node_to_cpumask;
+       cpumask_var_t nmsk;
+       int ret = -ENOMEM;
+       struct cpumask *masks = NULL;
+
+       if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+               return NULL;
+
+       node_to_cpumask = alloc_node_to_cpumask();
+       if (!node_to_cpumask)
+               goto fail_nmsk;
+
+       masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+       if (!masks)
+               goto fail_node_to_cpumask;
+
+       build_node_to_cpumask(node_to_cpumask);
+
+       ret = __group_cpus_evenly(0, numgrps, node_to_cpumask, cpu_mask, nmsk,
+                                 masks);
+
+fail_node_to_cpumask:
+       free_node_to_cpumask(node_to_cpumask);
+
+fail_nmsk:
+       free_cpumask_var(nmsk);
+       if (ret < 0) {
+               kfree(masks);
+               return NULL;
+       }
+       *nummasks = ret;
+       return masks;
+}
+EXPORT_SYMBOL_GPL(group_mask_cpus_evenly);
+
  #else /* CONFIG_SMP */
  struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int 
*nummasks)
  {
@@ -442,5 +496,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, 
unsigned int *nummasks)
        *nummasks = 1;
        return masks;
  }
-#endif /* CONFIG_SMP */
  EXPORT_SYMBOL_GPL(group_cpus_evenly);
+
+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+                                      const struct cpumask *cpu_mask,
+                                      unsigned int *nummasks)
+{
+       return group_cpus_evenly(numgrps, nummasks);
+}
+EXPORT_SYMBOL_GPL(group_mask_cpus_evenly);
+#endif /* CONFIG_SMP */


Otherwise:
Reviewed-by: Hannes Reinecke <h...@suse.de.

Cheers,

Hannes
--
Dr. Hannes Reinecke                  Kernel Storage Architect
h...@suse.de                                +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich

Reply via email to