commit:     9ae3c38079c69dc3335f4e20816987575a5ea5c7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jun 26 21:51:26 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jun 26 21:51:26 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9ae3c380

Updated BMQ Schedular patch to r2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  2 +-
 ...=> 5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch | 55 +++++++---------------
 2 files changed, 19 insertions(+), 38 deletions(-)

diff --git a/0000_README b/0000_README
index 17ef0755..728697d0 100644
--- a/0000_README
+++ b/0000_README
@@ -111,7 +111,7 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional 
CPUs.
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch
 From:   https://gitlab.com/alfredchen/linux-prjc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from 
PDS(incld). Inspired by the scheduler in zircon.
 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch 
b/5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch
similarity index 99%
rename from 5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch
index a130157e..cf13d856 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.18-r1.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.18-r2.patch
@@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..189332cd6f99
+index 000000000000..b8e67d568e17
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7768 @@
+@@ -0,0 +1,7750 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -705,7 +705,7 @@ index 000000000000..189332cd6f99
 +#define sched_feat(x) (0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v5.18-r1"
++#define ALT_SCHED_VERSION "v5.18-r2"
 +
 +/* rt_prio(prio) defined in include/linux/sched/rt.h */
 +#define rt_task(p)            rt_prio((p)->prio)
@@ -785,14 +785,14 @@ index 000000000000..189332cd6f99
 +#ifdef CONFIG_SCHED_SMT
 +static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
 +#endif
-+static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] 
____cacheline_aligned_in_smp;
 +
 +/* sched_queue related functions */
 +static inline void sched_queue_init(struct sched_queue *q)
 +{
 +      int i;
 +
-+      bitmap_zero(q->bitmap, SCHED_BITS);
++      bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
 +      for(i = 0; i < SCHED_BITS; i++)
 +              INIT_LIST_HEAD(&q->heads[i]);
 +}
@@ -824,7 +824,7 @@ index 000000000000..189332cd6f99
 +      cpu = cpu_of(rq);
 +      if (watermark < last_wm) {
 +              for (i = last_wm; i > watermark; i--)
-+                      cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_BITS 
- 1 - i);
++                      cpumask_clear_cpu(cpu, sched_rq_watermark + 
SCHED_QUEUE_BITS - i);
 +#ifdef CONFIG_SCHED_SMT
 +              if (static_branch_likely(&sched_smt_present) &&
 +                  IDLE_TASK_SCHED_PRIO == last_wm)
@@ -835,7 +835,7 @@ index 000000000000..189332cd6f99
 +      }
 +      /* last_wm < watermark */
 +      for (i = watermark; i > last_wm; i--)
-+              cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_BITS - 1 - i);
++              cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
 +#ifdef CONFIG_SCHED_SMT
 +      if (static_branch_likely(&sched_smt_present) &&
 +          IDLE_TASK_SCHED_PRIO == watermark) {
@@ -2543,7 +2543,7 @@ index 000000000000..189332cd6f99
 +#endif
 +          cpumask_and(&tmp, &chk_mask, sched_rq_watermark) ||
 +          cpumask_and(&tmp, &chk_mask,
-+                      sched_rq_watermark + SCHED_BITS - task_sched_prio(p)))
++                      sched_rq_watermark + SCHED_QUEUE_BITS - 1 - 
task_sched_prio(p)))
 +              return best_mask_cpu(task_cpu(p), &tmp);
 +
 +      return best_mask_cpu(task_cpu(p), &chk_mask);
@@ -4334,24 +4334,6 @@ index 000000000000..189332cd6f99
 + */
 +void sched_exec(void)
 +{
-+      struct task_struct *p = current;
-+      unsigned long flags;
-+      int dest_cpu;
-+
-+      raw_spin_lock_irqsave(&p->pi_lock, flags);
-+      dest_cpu = cpumask_any(p->cpus_ptr);
-+      if (dest_cpu == smp_processor_id())
-+              goto unlock;
-+
-+      if (likely(cpu_active(dest_cpu))) {
-+              struct migration_arg arg = { p, dest_cpu };
-+
-+              raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+              stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-+              return;
-+      }
-+unlock:
-+      raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 +}
 +
 +#endif
@@ -4519,7 +4501,7 @@ index 000000000000..189332cd6f99
 +}
 +
 +#ifdef CONFIG_SCHED_SMT
-+static inline int active_load_balance_cpu_stop(void *data)
++static inline int sg_balance_cpu_stop(void *data)
 +{
 +      struct rq *rq = this_rq();
 +      struct task_struct *p = data;
@@ -4570,15 +4552,15 @@ index 000000000000..189332cd6f99
 +      raw_spin_unlock_irqrestore(&rq->lock, flags);
 +
 +      if (res)
-+              stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop,
-+                                  curr, &rq->active_balance_work);
++              stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++                                  &rq->active_balance_work);
 +      return res;
 +}
 +
 +/*
-+ * sg_balance_check - slibing group balance check for run queue @rq
++ * sg_balance - slibing group balance check for run queue @rq
 + */
-+static inline void sg_balance_check(struct rq *rq)
++static inline void sg_balance(struct rq *rq)
 +{
 +      cpumask_t chk;
 +      int cpu = cpu_of(rq);
@@ -5243,7 +5225,7 @@ index 000000000000..189332cd6f99
 +      }
 +
 +#ifdef CONFIG_SCHED_SMT
-+      sg_balance_check(rq);
++      sg_balance(rq);
 +#endif
 +}
 +
@@ -7884,7 +7866,7 @@ index 000000000000..189332cd6f99
 +      wait_bit_init();
 +
 +#ifdef CONFIG_SMP
-+      for (i = 0; i < SCHED_BITS; i++)
++      for (i = 0; i < SCHED_QUEUE_BITS; i++)
 +              cpumask_copy(sched_rq_watermark + i, cpu_present_mask);
 +#endif
 +
@@ -9094,10 +9076,10 @@ index 000000000000..611424bbfa9b
 +#endif /* ALT_SCHED_H */
 diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
 new file mode 100644
-index 000000000000..bf7ac80ec242
+index 000000000000..66b77291b9d0
 --- /dev/null
 +++ b/kernel/sched/bmq.h
-@@ -0,0 +1,111 @@
+@@ -0,0 +1,110 @@
 +#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler 
"ALT_SCHED_VERSION" by Alfred Chen.\n"
 +
 +/*
@@ -9185,8 +9167,7 @@ index 000000000000..bf7ac80ec242
 +
 +static void sched_task_fork(struct task_struct *p, struct rq *rq)
 +{
-+      p->boost_prio = (p->boost_prio < 0) ?
-+              p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
++      p->boost_prio = MAX_PRIORITY_ADJ;
 +}
 +
 +static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)

Reply via email to