commit:     315003951b6f0bc2014da65ad339d8a961f388e3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 28 22:04:16 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr 28 22:04:16 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=31500395

BMQ Patch v6.8-r6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   2 +-
 ... => 5020_BMQ-and-PDS-io-scheduler-v6.8-r6.patch | 227 ++++++++++-----------
 2 files changed, 106 insertions(+), 123 deletions(-)

diff --git a/0000_README b/0000_README
index 03fbc840..7f4b52bc 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,6 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional 
CPUs.
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.8-r4.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.8-r6.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from 
PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.8-r4.patch 
b/5020_BMQ-and-PDS-io-scheduler-v6.8-r6.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.8-r4.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.8-r6.patch
index 6ade9048..6fd79852 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.8-r4.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.8-r6.patch
@@ -268,10 +268,10 @@ index df3aca89d4f5..1df1f7635188 100644
  static inline bool dl_time_before(u64 a, u64 b)
  {
 diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85e1183..a9a1dfa99140 100644
+index ab83d85e1183..e66dfb553bc5 100644
 --- a/include/linux/sched/prio.h
 +++ b/include/linux/sched/prio.h
-@@ -18,6 +18,32 @@
+@@ -18,6 +18,28 @@
  #define MAX_PRIO              (MAX_RT_PRIO + NICE_WIDTH)
  #define DEFAULT_PRIO          (MAX_RT_PRIO + NICE_WIDTH / 2)
  
@@ -284,20 +284,16 @@ index ab83d85e1183..a9a1dfa99140 100644
 +/* +/- priority levels from the base priority */
 +#ifdef CONFIG_SCHED_BMQ
 +#define MAX_PRIORITY_ADJ      (12)
-+
-+#define MIN_NORMAL_PRIO               (MAX_RT_PRIO)
-+#define MAX_PRIO              (MIN_NORMAL_PRIO + NICE_WIDTH)
-+#define DEFAULT_PRIO          (MIN_NORMAL_PRIO + NICE_WIDTH / 2)
 +#endif
 +
 +#ifdef CONFIG_SCHED_PDS
 +#define MAX_PRIORITY_ADJ      (0)
++#endif
 +
 +#define MIN_NORMAL_PRIO               (128)
 +#define NORMAL_PRIO_NUM               (64)
 +#define MAX_PRIO              (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
-+#define DEFAULT_PRIO          (MAX_PRIO - NICE_WIDTH / 2)
-+#endif
++#define DEFAULT_PRIO          (MAX_PRIO - MAX_PRIORITY_ADJ - NICE_WIDTH / 2)
 +
 +#endif /* CONFIG_SCHED_ALT */
 +
@@ -422,7 +418,7 @@ index bee58f7468c3..81f568107a6b 100644
        select CGROUP_SCHED
        select FAIR_GROUP_SCHED
 diff --git a/init/init_task.c b/init/init_task.c
-index 7ecb458eb3da..40d2e86da6f9 100644
+index 7ecb458eb3da..e8f8be4f23ba 100644
 --- a/init/init_task.c
 +++ b/init/init_task.c
 @@ -70,9 +70,15 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@@ -430,9 +426,9 @@ index 7ecb458eb3da..40d2e86da6f9 100644
        .usage          = REFCOUNT_INIT(2),
        .flags          = PF_KTHREAD,
 +#ifdef CONFIG_SCHED_ALT
-+      .prio           = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++      .prio           = DEFAULT_PRIO,
 +      .static_prio    = DEFAULT_PRIO,
-+      .normal_prio    = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++      .normal_prio    = DEFAULT_PRIO,
 +#else
        .prio           = MAX_PRIO - 20,
        .static_prio    = MAX_PRIO - 20,
@@ -663,10 +659,10 @@ index 976092b7bd45..31d587c16ec1 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..1044207ba0ad
+index 000000000000..c566583fe838
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,8945 @@
+@@ -0,0 +1,8934 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -745,7 +741,7 @@ index 000000000000..1044207ba0ad
 +#define sched_feat(x) (0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v6.8-r4"
++#define ALT_SCHED_VERSION "v6.8-r6"
 +
 +/*
 + * Compile time debug macro
@@ -856,27 +852,21 @@ index 000000000000..1044207ba0ad
 +      idle->on_rq = TASK_ON_RQ_QUEUED;
 +}
 +
-+static inline void
-+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+      if (low < pr && pr <= high)
++#define CLEAR_CACHED_PREEMPT_MASK(pr, low, high, cpu)         \
++      if (low < pr && pr <= high)                             \
 +              cpumask_clear_cpu(cpu, sched_preempt_mask + pr);
-+}
 +
-+static inline void
-+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+      if (low < pr && pr <= high)
++#define SET_CACHED_PREEMPT_MASK(pr, low, high, cpu)           \
++      if (low < pr && pr <= high)                             \
 +              cpumask_set_cpu(cpu, sched_preempt_mask + pr);
-+}
 +
 +static atomic_t sched_prio_record = ATOMIC_INIT(0);
 +
 +/* water mark related functions */
 +static inline void update_sched_preempt_mask(struct rq *rq)
 +{
-+      unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+      unsigned long last_prio = rq->prio;
++      int prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++      int last_prio = rq->prio;
 +      int cpu, pr;
 +
 +      if (prio == last_prio)
@@ -899,7 +889,7 @@ index 000000000000..1044207ba0ad
 +                      cpumask_clear_cpu(cpu, sched_idle_mask);
 +                      last_prio -= 2;
 +              }
-+              clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++              CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
 +
 +              return;
 +      }
@@ -908,13 +898,12 @@ index 000000000000..1044207ba0ad
 +#ifdef CONFIG_SCHED_SMT
 +              if (static_branch_likely(&sched_smt_present) &&
 +                  cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
-+                      cpumask_or(sched_sg_idle_mask,
-+                                 sched_sg_idle_mask, cpu_smt_mask(cpu));
++                      cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, 
cpu_smt_mask(cpu));
 +#endif
 +              cpumask_set_cpu(cpu, sched_idle_mask);
 +              prio -= 2;
 +      }
-+      set_recorded_preempt_mask(pr, last_prio, prio, cpu);
++      SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
 +}
 +
 +/*
@@ -927,8 +916,7 @@ index 000000000000..1044207ba0ad
 +      return list_first_entry(head, struct task_struct, sq_node);
 +}
 +
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
++static inline struct task_struct * sched_rq_next_task(struct task_struct *p, 
struct rq *rq)
 +{
 +      struct list_head *next = p->sq_node.next;
 +
@@ -936,7 +924,8 @@ index 000000000000..1044207ba0ad
 +              struct list_head *head;
 +              unsigned long idx = next - &rq->queue.heads[0];
 +
-+              idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS, 
sched_idx2prio(idx, rq) + 1);
++              idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++                                  sched_idx2prio(idx, rq) + 1);
 +              head = &rq->queue.heads[sched_prio2idx(idx, rq)];
 +
 +              return list_first_entry(head, struct task_struct, sq_node);
@@ -1040,16 +1029,14 @@ index 000000000000..1044207ba0ad
 +/*
 + * Context: p->pi_lock
 + */
-+static inline struct rq
-+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++static inline struct rq *__task_access_lock(struct task_struct *p, 
raw_spinlock_t **plock)
 +{
 +      struct rq *rq;
 +      for (;;) {
 +              rq = task_rq(p);
 +              if (p->on_cpu || task_on_rq_queued(p)) {
 +                      raw_spin_lock(&rq->lock);
-+                      if (likely((p->on_cpu || task_on_rq_queued(p))
-+                                 && rq == task_rq(p))) {
++                      if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == 
task_rq(p))) {
 +                              *plock = &rq->lock;
 +                              return rq;
 +                      }
@@ -1065,24 +1052,21 @@ index 000000000000..1044207ba0ad
 +      }
 +}
 +
-+static inline void
-+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++static inline void __task_access_unlock(struct task_struct *p, raw_spinlock_t 
*lock)
 +{
 +      if (NULL != lock)
 +              raw_spin_unlock(lock);
 +}
 +
-+static inline struct rq
-+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
-+                        unsigned long *flags)
++static inline struct rq *
++task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, 
unsigned long *flags)
 +{
 +      struct rq *rq;
 +      for (;;) {
 +              rq = task_rq(p);
 +              if (p->on_cpu || task_on_rq_queued(p)) {
 +                      raw_spin_lock_irqsave(&rq->lock, *flags);
-+                      if (likely((p->on_cpu || task_on_rq_queued(p))
-+                                 && rq == task_rq(p))) {
++                      if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == 
task_rq(p))) {
 +                              *plock = &rq->lock;
 +                              return rq;
 +                      }
@@ -1093,8 +1077,7 @@ index 000000000000..1044207ba0ad
 +                      } while (unlikely(task_on_rq_migrating(p)));
 +              } else {
 +                      raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+                      if (likely(!p->on_cpu && !p->on_rq &&
-+                                 rq == task_rq(p))) {
++                      if (likely(!p->on_cpu && !p->on_rq && rq == 
task_rq(p))) {
 +                              *plock = &p->pi_lock;
 +                              return rq;
 +                      }
@@ -1104,8 +1087,7 @@ index 000000000000..1044207ba0ad
 +}
 +
 +static inline void
-+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
-+                            unsigned long *flags)
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, 
unsigned long *flags)
 +{
 +      raw_spin_unlock_irqrestore(lock, *flags);
 +}
@@ -1173,15 +1155,13 @@ index 000000000000..1044207ba0ad
 +      }
 +}
 +
-+static inline void
-+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
 +      __acquires(rq->lock)
 +{
 +      raw_spin_lock_irqsave(&rq->lock, rf->flags);
 +}
 +
-+static inline void
-+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
 +      __releases(rq->lock)
 +{
 +      raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
@@ -1301,8 +1281,7 @@ index 000000000000..1044207ba0ad
 +static inline void rq_load_update(struct rq *rq)
 +{
 +      u64 time = rq->clock;
-+      u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
-+                      RQ_LOAD_HISTORY_BITS - 1);
++      u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), 
RQ_LOAD_HISTORY_BITS - 1);
 +      u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
 +      u64 curr = !!rq->nr_running;
 +
@@ -1366,8 +1345,7 @@ index 000000000000..1044207ba0ad
 +#ifdef CONFIG_SMP
 +      rq_load_update(rq);
 +#endif
-+      data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+                                                cpu_of(rq)));
++      data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 
cpu_of(rq)));
 +      if (data)
 +              data->func(data, rq_clock(rq), flags);
 +}
@@ -2117,8 +2095,7 @@ index 000000000000..1044207ba0ad
 +
 +static inline int __normal_prio(int policy, int rt_prio, int static_prio)
 +{
-+      return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
-+              static_prio + MAX_PRIORITY_ADJ;
++      return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : static_prio;
 +}
 +
 +/*
@@ -2393,8 +2370,7 @@ index 000000000000..1044207ba0ad
 + *
 + * Returns (locked) new rq. Old rq's lock is released.
 + */
-+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
-+                                 new_cpu)
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int 
new_cpu)
 +{
 +      int src_cpu;
 +
@@ -2435,8 +2411,7 @@ index 000000000000..1044207ba0ad
 + * So we race with normal scheduler movements, but that's OK, as long
 + * as the task is no longer on this CPU.
 + */
-+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
-+                               dest_cpu)
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int 
dest_cpu)
 +{
 +      /* Affinity changed (again). */
 +      if (!is_cpu_allowed(p, dest_cpu))
@@ -4862,41 +4837,40 @@ index 000000000000..1044207ba0ad
 +              wq_worker_tick(curr);
 +}
 +
-+#ifdef CONFIG_SCHED_SMT
-+static inline int sg_balance_cpu_stop(void *data)
++#ifdef CONFIG_SMP
++
++static int active_balance_cpu_stop(void *data)
 +{
++      struct balance_arg *arg = data;
++      struct task_struct *p = arg->task;
 +      struct rq *rq = this_rq();
-+      struct task_struct *p = data;
-+      cpumask_t tmp;
 +      unsigned long flags;
++      cpumask_t tmp;
 +
 +      local_irq_save(flags);
 +
 +      raw_spin_lock(&p->pi_lock);
 +      raw_spin_lock(&rq->lock);
 +
-+      rq->active_balance = 0;
-+      /* _something_ may have changed the task, double check again */
++      arg->active = 0;
++
 +      if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+          cpumask_and(&tmp, p->cpus_ptr, sched_sg_idle_mask) &&
++          cpumask_and(&tmp, p->cpus_ptr, arg->cpumask) &&
 +          !is_migration_disabled(p)) {
-+              int cpu = cpu_of(rq);
-+              int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, 
cpu));
++              int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, 
cpu_of(rq)));
 +              rq = move_queued_task(rq, p, dcpu);
 +      }
 +
 +      raw_spin_unlock(&rq->lock);
-+      raw_spin_unlock(&p->pi_lock);
-+
-+      local_irq_restore(flags);
++      raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 +
 +      return 0;
 +}
 +
-+/* sg_balance_trigger - trigger slibing group balance for @cpu */
-+static inline int sg_balance_trigger(struct rq *src_rq, const int cpu)
++/* trigger_active_balance - for @rq */
++static inline int
++trigger_active_balance(struct rq *src_rq, struct rq *rq, struct balance_arg 
*arg)
 +{
-+      struct rq *rq= cpu_rq(cpu);
 +      unsigned long flags;
 +      struct task_struct *p;
 +      int res;
@@ -4906,10 +4880,13 @@ index 000000000000..1044207ba0ad
 +
 +      res = (1 == rq->nr_running) &&                                  \
 +            !is_migration_disabled((p = sched_rq_first_task(rq))) &&  \
-+            cpumask_intersects(p->cpus_ptr, sched_sg_idle_mask) &&    \
-+            !rq->active_balance;
-+      if (res)
-+              rq->active_balance = 1;
++            cpumask_intersects(p->cpus_ptr, arg->cpumask) &&          \
++            !arg->active;
++      if (res) {
++              arg->task = p;
++
++              arg->active = 1;
++      }
 +
 +      raw_spin_unlock_irqrestore(&rq->lock, flags);
 +
@@ -4917,8 +4894,8 @@ index 000000000000..1044207ba0ad
 +              preempt_disable();
 +              raw_spin_unlock(&src_rq->lock);
 +
-+              stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, p,
-+                                  &rq->active_balance_work);
++              stop_one_cpu_nowait(cpu_of(rq), active_balance_cpu_stop,
++                                  arg, &rq->active_balance_work);
 +
 +              preempt_enable();
 +              raw_spin_lock(&src_rq->lock);
@@ -4927,6 +4904,7 @@ index 000000000000..1044207ba0ad
 +      return res;
 +}
 +
++#ifdef CONFIG_SCHED_SMT
 +/*
 + * sg_balance - slibing group balance check for run queue @rq
 + */
@@ -4939,9 +4917,11 @@ index 000000000000..1044207ba0ad
 +              int i, cpu = cpu_of(rq);
 +
 +              for_each_cpu_wrap(i, &chk, cpu) {
-+                      if (cpumask_subset(cpu_smt_mask(i), &chk) &&\
-+                          sg_balance_trigger(rq, i))
-+                              return;
++                      if (cpumask_subset(cpu_smt_mask(i), &chk)) {
++                              struct rq *target_rq = cpu_rq(i);
++                              if (trigger_active_balance(rq, target_rq, 
&target_rq->sg_balance_arg))
++                                      return;
++                      }
 +              }
 +      }
 +}
@@ -4951,6 +4931,8 @@ index 000000000000..1044207ba0ad
 +};
 +#endif /* CONFIG_SCHED_SMT */
 +
++#endif /* CONFIG_SMP */
++
 +#ifdef CONFIG_NO_HZ_FULL
 +
 +struct tick_work {
@@ -5378,8 +5360,9 @@ index 000000000000..1044207ba0ad
 +#endif
 +
 +#ifdef CONFIG_SCHED_SMT
-+                      if (likely(rq->online) &&
-+                          cpumask_test_cpu(cpu, sched_sg_idle_mask))
++                      if (static_key_count(&sched_smt_present.key) > 1 &&
++                          cpumask_test_cpu(cpu, sched_sg_idle_mask) &&
++                          rq->online)
 +                              __queue_balance_callback(rq, 
&per_cpu(sg_balance_head, cpu));
 +#endif
 +                      schedstat_inc(rq->sched_goidle);
@@ -5544,9 +5527,6 @@ index 000000000000..1044207ba0ad
 +#endif
 +
 +      if (likely(prev != next)) {
-+#ifdef CONFIG_SCHED_BMQ
-+              rq->last_ts_switch = rq->clock;
-+#endif
 +              next->last_ran = rq->clock_task;
 +
 +              /*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
@@ -8288,6 +8268,8 @@ index 000000000000..1044207ba0ad
 +#ifdef CONFIG_SCHED_SMT
 +              TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
 +#endif
++              TOPOLOGY_CPUMASK(cluster, topology_cluster_cpumask(cpu), false);
++
 +              per_cpu(sd_llc_id, cpu) = 
cpumask_first(cpu_coregroup_mask(cpu));
 +              per_cpu(sched_cpu_llc_mask, cpu) = topo;
 +              TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
@@ -8354,6 +8336,9 @@ index 000000000000..1044207ba0ad
 +{
 +      int i;
 +      struct rq *rq;
++#ifdef CONFIG_SCHED_SMT
++      struct balance_arg balance_arg = {.cpumask = sched_sg_idle_mask, 
.active = 0};
++#endif
 +
 +      printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler 
"ALT_SCHED_VERSION\
 +                       " by Alfred Chen.\n");
@@ -8390,7 +8375,7 @@ index 000000000000..1044207ba0ad
 +              rq->cpu = i;
 +
 +#ifdef CONFIG_SCHED_SMT
-+              rq->active_balance = 0;
++              rq->sg_balance_arg = balance_arg;
 +#endif
 +
 +#ifdef CONFIG_NO_HZ_COMMON
@@ -9652,10 +9637,10 @@ index 000000000000..1dbd7eb6a434
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..ba1b7b805f1a
+index 000000000000..5da166931ee9
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,975 @@
+@@ -0,0 +1,976 @@
 +#ifndef ALT_SCHED_H
 +#define ALT_SCHED_H
 +
@@ -9814,6 +9799,12 @@ index 000000000000..ba1b7b805f1a
 +      void (*func)(struct rq *rq);
 +};
 +
++struct balance_arg {
++      struct task_struct      *task;
++      int                     active;
++      cpumask_t               *cpumask;
++};
++
 +/*
 + * This is the main, per-CPU runqueue data structure.
 + * This data should only be modified by the local cpu.
@@ -9828,11 +9819,12 @@ index 000000000000..ba1b7b805f1a
 +      struct mm_struct                *prev_mm;
 +
 +      struct sched_queue              queue           ____cacheline_aligned;
++
++      int                             prio;
 +#ifdef CONFIG_SCHED_PDS
-+      u64                     time_edge;
-+      unsigned long                   prio_idx;
++      int                             prio_idx;
++      u64                             time_edge;
 +#endif
-+      unsigned long                   prio;
 +
 +      /* switch count */
 +      u64 nr_switches;
@@ -9861,9 +9853,10 @@ index 000000000000..ba1b7b805f1a
 +#endif
 +
 +#ifdef CONFIG_SCHED_SMT
-+      int active_balance;
-+      struct cpu_stop_work    active_balance_work;
++      struct balance_arg      sg_balance_arg          ____cacheline_aligned;
 +#endif
++      struct cpu_stop_work    active_balance_work;
++
 +      struct balance_callback *balance_callback;
 +#ifdef CONFIG_HOTPLUG_CPU
 +      struct rcuwait          hotplug_wait;
@@ -9893,9 +9886,6 @@ index 000000000000..ba1b7b805f1a
 +      /* Ensure that all clocks are in the same cache line */
 +      u64                     clock ____cacheline_aligned;
 +      u64                     clock_task;
-+#ifdef CONFIG_SCHED_BMQ
-+      u64                     last_ts_switch;
-+#endif
 +
 +      unsigned int  nr_running;
 +      unsigned long nr_uninterruptible;
@@ -10004,10 +9994,6 @@ index 000000000000..ba1b7b805f1a
 +      return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
 +}
 +
-+extern void flush_smp_call_function_queue(void);
-+
-+#else  /* !CONFIG_SMP */
-+static inline void flush_smp_call_function_queue(void) { }
 +#endif
 +
 +#ifndef arch_scale_freq_tick
@@ -10633,19 +10619,16 @@ index 000000000000..ba1b7b805f1a
 +#endif /* ALT_SCHED_H */
 diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
 new file mode 100644
-index 000000000000..b7c9813f6fa7
+index 000000000000..bdc34ad3cc8c
 --- /dev/null
 +++ b/kernel/sched/bmq.h
-@@ -0,0 +1,101 @@
+@@ -0,0 +1,98 @@
 +#define ALT_SCHED_NAME "BMQ"
 +
 +/*
 + * BMQ only routines
 + */
-+#define rq_switch_time(rq)    ((rq)->clock - (rq)->last_ts_switch)
-+#define boost_threshold(p)    (sysctl_sched_base_slice >> ((20 - 
(p)->boost_prio) / 2))
-+
-+static inline void boost_task(struct task_struct *p)
++static inline void boost_task(struct task_struct *p, int n)
 +{
 +      int limit;
 +
@@ -10660,8 +10643,7 @@ index 000000000000..b7c9813f6fa7
 +              return;
 +      }
 +
-+      if (p->boost_prio > limit)
-+              p->boost_prio--;
++      p->boost_prio = max(limit, p->boost_prio - n);
 +}
 +
 +static inline void deboost_task(struct task_struct *p)
@@ -10679,13 +10661,13 @@ index 000000000000..b7c9813f6fa7
 +static inline int
 +task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
 +{
-+      return p->prio + p->boost_prio - MAX_RT_PRIO;
++      return p->prio + p->boost_prio - MIN_NORMAL_PRIO;
 +}
 +
 +static inline int task_sched_prio(const struct task_struct *p)
 +{
-+      return (p->prio < MAX_RT_PRIO)? (p->prio >> 2) :
-+              MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MAX_RT_PRIO) 
/ 2;
++      return (p->prio < MIN_NORMAL_PRIO)? (p->prio >> 2) :
++              MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - 
MIN_NORMAL_PRIO) / 2;
 +}
 +
 +#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio) \
@@ -10709,7 +10691,7 @@ index 000000000000..b7c9813f6fa7
 +
 +inline int task_running_nice(struct task_struct *p)
 +{
-+      return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++      return (p->prio + p->boost_prio > DEFAULT_PRIO);
 +}
 +
 +static inline void sched_update_rq_clock(struct rq *rq) {}
@@ -10729,14 +10711,15 @@ index 000000000000..b7c9813f6fa7
 +
 +static inline void sched_task_ttwu(struct task_struct *p)
 +{
-+      if(this_rq()->clock_task - p->last_ran > sysctl_sched_base_slice)
-+              boost_task(p);
++      s64 delta = this_rq()->clock_task > p->last_ran;
++
++      if (likely(delta > 0))
++              boost_task(p, delta  >> 22);
 +}
 +
 +static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
 +{
-+      if (rq_switch_time(rq) < boost_threshold(p))
-+              boost_task(p);
++      boost_task(p, 1);
 +}
 diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
 index d9dc9ab3773f..71a25540d65e 100644

Reply via email to