From: Vineeth Pillai <virem...@linux.microsoft.com>

Hotplug fixes to core-scheduling require a new cpumask iterator which iterates
through all online cpus in both the given cpumasks. This patch introduces it.

Signed-off-by: Vineeth Pillai <virem...@linux.microsoft.com>
Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org>
---
 include/linux/cpumask.h | 42 ++++++++++++++++++++++++++++++++
 lib/cpumask.c           | 53 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 95 insertions(+)

diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f0d895d6ac39..03e8c57c6ca6 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -207,6 +207,10 @@ static inline int cpumask_any_and_distribute(const struct 
cpumask *src1p,
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
 #define for_each_cpu_and(cpu, mask1, mask2)    \
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
+#define for_each_cpu_or(cpu, mask1, mask2)     \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
+#define for_each_cpu_wrap_or(cpu, mask1, mask2, start) \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2, 
(void)(start))
 #else
 /**
  * cpumask_first - get the first cpu in a cpumask
@@ -248,6 +252,7 @@ static inline unsigned int cpumask_next_zero(int n, const 
struct cpumask *srcp)
 }
 
 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int cpumask_next_or(int n, const struct cpumask *, const struct cpumask *);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
 unsigned int cpumask_local_spread(unsigned int i, int node);
 int cpumask_any_and_distribute(const struct cpumask *src1p,
@@ -278,6 +283,8 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
                (cpu) < nr_cpu_ids;)
 
 extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, 
bool wrap);
+extern int cpumask_next_wrap_or(int n, const struct cpumask *mask1,
+                                const struct cpumask *mask2, int start, bool 
wrap);
 
 /**
  * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a 
specified location
@@ -294,6 +301,22 @@ extern int cpumask_next_wrap(int n, const struct cpumask 
*mask, int start, bool
             (cpu) < nr_cpumask_bits;                                           
\
             (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
 
+/**
+ * for_each_cpu_wrap_or - iterate over every cpu in both masks, starting at a 
specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ * @start: the start location
+ *
+ * The implementation does not assume any bit both masks are set (including 
@start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap_or(cpu, mask1, mask2, start)                         
        \
+       for ((cpu) = cpumask_next_wrap_or((start)-1, (mask1), (mask2), (start), 
false); \
+            (cpu) < nr_cpumask_bits;                                           
\
+            (cpu) = cpumask_next_wrap_or((cpu), (mask1), (mask2), (start), 
true))
+
 /**
  * for_each_cpu_and - iterate over every cpu in both masks
  * @cpu: the (optionally unsigned) integer iterator
@@ -312,6 +335,25 @@ extern int cpumask_next_wrap(int n, const struct cpumask 
*mask, int start, bool
        for ((cpu) = -1;                                                \
                (cpu) = cpumask_next_and((cpu), (mask1), (mask2)),      \
                (cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_or - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places.  It is equivalent to:
+ *     struct cpumask tmp;
+ *     cpumask_and(&tmp, &mask1, &mask2);
+ *     for_each_cpu(cpu, &tmp)
+ *             ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_or(cpu, mask1, mask2)                             \
+       for ((cpu) = -1;                                                \
+               (cpu) = cpumask_next_or((cpu), (mask1), (mask2)),       \
+               (cpu) < nr_cpu_ids;)
 #endif /* SMP */
 
 #define CPU_BITS_NONE                                          \
diff --git a/lib/cpumask.c b/lib/cpumask.c
index fb22fb266f93..0a5cdbd4eb6a 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -42,6 +42,25 @@ int cpumask_next_and(int n, const struct cpumask *src1p,
 }
 EXPORT_SYMBOL(cpumask_next_and);
 
+/**
+ * cpumask_next_or - get the next cpu in *src1p | *src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in both.
+ */
+int cpumask_next_or(int n, const struct cpumask *src1p,
+                   const struct cpumask *src2p)
+{
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpumask_check(n);
+       return find_next_or_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+               nr_cpumask_bits, n + 1);
+}
+EXPORT_SYMBOL(cpumask_next_or);
+
 /**
  * cpumask_any_but - return a "random" in a cpumask, but not this one.
  * @mask: the cpumask to search
@@ -94,6 +113,40 @@ int cpumask_next_wrap(int n, const struct cpumask *mask, 
int start, bool wrap)
 }
 EXPORT_SYMBOL(cpumask_next_wrap);
 
+/**
+ * cpumask_next_wrap_or - helper to implement for_each_cpu_wrap_or
+ * @n: the cpu prior to the place to search
+ * @mask1: first cpumask pointer
+ * @mask2: second cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap_or(int n, const struct cpumask *mask1, const struct 
cpumask *mask2,
+                          int start, bool wrap)
+{
+       int next;
+
+again:
+       next = cpumask_next_or(n, mask1, mask2);
+
+       if (wrap && n < start && next >= start) {
+               return nr_cpumask_bits;
+
+       } else if (next >= nr_cpumask_bits) {
+               wrap = true;
+               n = -1;
+               goto again;
+       }
+
+       return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap_or);
+
 /* These are not inline because of header tangles. */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /**
-- 
2.28.0.220.ged08abb693-goog

Reply via email to