[PATCH v2 03/10] sched/topology: Provide cfs_overload_cpus bitmap

2018-11-05 Thread Steve Sistare
From: Steve Sistare 

Define and initialize a sparse bitmap of overloaded CPUs, per
last-level-cache scheduling domain, for use by the CFS scheduling class.
Save a pointer to cfs_overload_cpus in the rq for efficient access.

Signed-off-by: Steve Sistare 
---
 include/linux/sched/topology.h |  1 +
 kernel/sched/sched.h   |  2 ++
 kernel/sched/topology.c| 21 +++--
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 2634774..8bac15d 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -72,6 +72,7 @@ struct sched_domain_shared {
atomic_tref;
atomic_tnr_busy_cpus;
int has_idle_cores;
+   struct sparsemask *cfs_overload_cpus;
 };
 
 struct sched_domain {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 455fa33..aadfe68 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -81,6 +81,7 @@
 
 struct rq;
 struct cpuidle_state;
+struct sparsemask;
 
 /* task_struct::on_rq states: */
 #define TASK_ON_RQ_QUEUED  1
@@ -805,6 +806,7 @@ struct rq {
struct cfs_rq   cfs;
struct rt_rqrt;
struct dl_rqdl;
+   struct sparsemask   *cfs_overload_cpus;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this CPU: */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index a2363f6..f18c416 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -3,6 +3,7 @@
  * Scheduler topology setup/handling methods
  */
 #include "sched.h"
+#include 
 
 DEFINE_MUTEX(sched_domains_mutex);
 
@@ -440,6 +441,7 @@ static void update_top_cache_domain(int cpu)
 static void
 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 {
+   struct sparsemask *cfs_overload_cpus;
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
 
@@ -481,6 +483,10 @@ static void update_top_cache_domain(int cpu)
dirty_sched_domain_sysctl(cpu);
destroy_sched_domains(tmp);
 
+   sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+   cfs_overload_cpus = (sd ? sd->shared->cfs_overload_cpus : NULL);
+   rcu_assign_pointer(rq->cfs_overload_cpus, cfs_overload_cpus);
+
update_top_cache_domain(cpu);
 }
 
@@ -1611,9 +1617,19 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
 }
 
+#define ZALLOC_MASK(maskp, nelems, node) \
+   (!*(maskp) && !zalloc_sparsemask_node(maskp, nelems,  \
+ SPARSEMASK_DENSITY_DEFAULT, \
+ GFP_KERNEL, node))  \
+
 static int sd_llc_alloc(struct sched_domain *sd)
 {
-   /* Allocate sd->shared data here. Empty for now. */
+   struct sched_domain_shared *sds = sd->shared;
+   struct cpumask *span = sched_domain_span(sd);
+   int nid = cpu_to_node(cpumask_first(span));
+
+   if (ZALLOC_MASK(>cfs_overload_cpus, nr_cpu_ids, nid))
+   return 1;
 
return 0;
 }
@@ -1625,7 +1641,8 @@ static void sd_llc_free(struct sched_domain *sd)
if (!sds)
return;
 
-   /* Free data here. Empty for now. */
+   free_sparsemask(sds->cfs_overload_cpus);
+   sds->cfs_overload_cpus = NULL;
 }
 
 static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
-- 
1.8.3.1



[PATCH v2 03/10] sched/topology: Provide cfs_overload_cpus bitmap

2018-11-05 Thread Steve Sistare
From: Steve Sistare 

Define and initialize a sparse bitmap of overloaded CPUs, per
last-level-cache scheduling domain, for use by the CFS scheduling class.
Save a pointer to cfs_overload_cpus in the rq for efficient access.

Signed-off-by: Steve Sistare 
---
 include/linux/sched/topology.h |  1 +
 kernel/sched/sched.h   |  2 ++
 kernel/sched/topology.c| 21 +++--
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 2634774..8bac15d 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -72,6 +72,7 @@ struct sched_domain_shared {
atomic_tref;
atomic_tnr_busy_cpus;
int has_idle_cores;
+   struct sparsemask *cfs_overload_cpus;
 };
 
 struct sched_domain {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 455fa33..aadfe68 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -81,6 +81,7 @@
 
 struct rq;
 struct cpuidle_state;
+struct sparsemask;
 
 /* task_struct::on_rq states: */
 #define TASK_ON_RQ_QUEUED  1
@@ -805,6 +806,7 @@ struct rq {
struct cfs_rq   cfs;
struct rt_rqrt;
struct dl_rqdl;
+   struct sparsemask   *cfs_overload_cpus;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this CPU: */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index a2363f6..f18c416 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -3,6 +3,7 @@
  * Scheduler topology setup/handling methods
  */
 #include "sched.h"
+#include 
 
 DEFINE_MUTEX(sched_domains_mutex);
 
@@ -440,6 +441,7 @@ static void update_top_cache_domain(int cpu)
 static void
 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 {
+   struct sparsemask *cfs_overload_cpus;
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
 
@@ -481,6 +483,10 @@ static void update_top_cache_domain(int cpu)
dirty_sched_domain_sysctl(cpu);
destroy_sched_domains(tmp);
 
+   sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+   cfs_overload_cpus = (sd ? sd->shared->cfs_overload_cpus : NULL);
+   rcu_assign_pointer(rq->cfs_overload_cpus, cfs_overload_cpus);
+
update_top_cache_domain(cpu);
 }
 
@@ -1611,9 +1617,19 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
 }
 
+#define ZALLOC_MASK(maskp, nelems, node) \
+   (!*(maskp) && !zalloc_sparsemask_node(maskp, nelems,  \
+ SPARSEMASK_DENSITY_DEFAULT, \
+ GFP_KERNEL, node))  \
+
 static int sd_llc_alloc(struct sched_domain *sd)
 {
-   /* Allocate sd->shared data here. Empty for now. */
+   struct sched_domain_shared *sds = sd->shared;
+   struct cpumask *span = sched_domain_span(sd);
+   int nid = cpu_to_node(cpumask_first(span));
+
+   if (ZALLOC_MASK(>cfs_overload_cpus, nr_cpu_ids, nid))
+   return 1;
 
return 0;
 }
@@ -1625,7 +1641,8 @@ static void sd_llc_free(struct sched_domain *sd)
if (!sds)
return;
 
-   /* Free data here. Empty for now. */
+   free_sparsemask(sds->cfs_overload_cpus);
+   sds->cfs_overload_cpus = NULL;
 }
 
 static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
-- 
1.8.3.1