Re: [PATCH] sched/rt: Introduce prio_lower() helper for comparing RT task prority

2018-11-08 Thread Muchun Song
Hi Peter,

This is v2 patch. I'm sorry that I forgot to add the
word "v2" to the subject.

Yours,
Muchun Song


Re: [PATCH] sched/rt: Introduce prio_lower() helper for comparing RT task prority

2018-11-08 Thread Muchun Song
Hi Peter,

This is v2 patch. I'm sorry that I forgot to add the
word "v2" to the subject.

Yours,
Muchun Song


[PATCH] sched/rt: Introduce prio_lower() helper for comparing RT task prority

2018-11-08 Thread Muchun Song
We use a value to represent the priority of the RT task. But a smaller
value corresponds to a higher priority. If there are two RT task A and B,
their priorities are prio_a and prio_b, respectively. If prio_a is larger
than prio_b, which means that the priority of RT task A is lower than RT
task B. It may seem a bit strange.

In rt.c, there are many if condition of priority comparison. We need to
think carefully about which priority is higher because of this opposite
logic when read those code. So we introduce prio_lower() helper for
comparing RT task prority, which can make code more readable.

Signed-off-by: Muchun Song 
---
 kernel/sched/rt.c | 54 ---
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 9aa3287ce301..4808684607b9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -101,6 +101,15 @@ void init_rt_rq(struct rt_rq *rt_rq)
raw_spin_lock_init(_rq->rt_runtime_lock);
 }
 
+/**
+ * prio_lower(a, b) returns true if the priority a is
+ * lower than the priority b, otherwise return false.
+ */
+static inline bool prio_lower(int a, int b)
+{
+   return a > b;
+}
+
 #ifdef CONFIG_RT_GROUP_SCHED
 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
@@ -262,7 +271,7 @@ static void pull_rt_task(struct rq *this_rq);
 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 {
/* Try to pull RT tasks here if we lower this rq's prio */
-   return rq->rt.highest_prio.curr > prev->prio;
+   return prio_lower(rq->rt.highest_prio.curr, prev->prio);
 }
 
 static inline int rt_overloaded(struct rq *rq)
@@ -377,7 +386,7 @@ static void enqueue_pushable_task(struct rq *rq, struct 
task_struct *p)
plist_add(>pushable_tasks, >rt.pushable_tasks);
 
/* Update the highest prio pushable task */
-   if (p->prio < rq->rt.highest_prio.next)
+   if (prio_lower(rq->rt.highest_prio.next, p->prio))
rq->rt.highest_prio.next = p->prio;
 }
 
@@ -498,7 +507,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
 
-   if (rt_rq->highest_prio.curr < curr->prio)
+   if (prio_lower(curr->prio, rt_rq->highest_prio.curr))
resched_curr(rq);
}
 }
@@ -1044,7 +1053,7 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int 
prev_prio)
if (>rt != rt_rq)
return;
 #endif
-   if (rq->online && prio < prev_prio)
+   if (rq->online && prio_lower(prev_prio, prio))
cpupri_set(>rd->cpupri, rq->cpu, prio);
 }
 
@@ -1079,7 +1088,7 @@ inc_rt_prio(struct rt_rq *rt_rq, int prio)
 {
int prev_prio = rt_rq->highest_prio.curr;
 
-   if (prio < prev_prio)
+   if (prio_lower(prev_prio, prio))
rt_rq->highest_prio.curr = prio;
 
inc_rt_prio_smp(rt_rq, prio, prev_prio);
@@ -1092,7 +1101,7 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
 
if (rt_rq->rt_nr_running) {
 
-   WARN_ON(prio < prev_prio);
+   WARN_ON(prio_lower(prev_prio, prio));
 
/*
 * This may have been our highest task, and therefore
@@ -1424,7 +1433,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
sd_flag, int flags)
 */
if (curr && unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
-curr->prio <= p->prio)) {
+!prio_lower(curr->prio, p->prio))) {
int target = find_lowest_rq(p);
 
/*
@@ -1432,7 +1441,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
sd_flag, int flags)
 * not running a lower priority task.
 */
if (target != -1 &&
-   p->prio < cpu_rq(target)->rt.highest_prio.curr)
+   prio_lower(cpu_rq(target)->rt.highest_prio.curr, p->prio))
cpu = target;
}
rcu_read_unlock();
@@ -1475,7 +1484,7 @@ static void check_preempt_equal_prio(struct rq *rq, 
struct task_struct *p)
  */
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int 
flags)
 {
-   if (p->prio < rq->curr->prio) {
+   if (prio_lower(rq->curr->prio, p->prio)) {
resched_curr(rq);
return;
}
@@ -1732,7 +1741,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct 
*task, struct rq *rq)
 
lowest_rq = cpu_rq(cpu);
 
-   if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+   if (!prio_lower(lowest_rq->rt.highest_prio.curr, task->prio)) {
/*
 * Target rq has tasks of equal or higher priority,
 * retrying does not release any lock and is unlikely
@@ -1763,7 +1772,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct 
*task, 

[PATCH] sched/rt: Introduce prio_lower() helper for comparing RT task prority

2018-11-08 Thread Muchun Song
We use a value to represent the priority of the RT task. But a smaller
value corresponds to a higher priority. If there are two RT task A and B,
their priorities are prio_a and prio_b, respectively. If prio_a is larger
than prio_b, which means that the priority of RT task A is lower than RT
task B. It may seem a bit strange.

In rt.c, there are many if condition of priority comparison. We need to
think carefully about which priority is higher because of this opposite
logic when read those code. So we introduce prio_lower() helper for
comparing RT task prority, which can make code more readable.

Signed-off-by: Muchun Song 
---
 kernel/sched/rt.c | 54 ---
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 9aa3287ce301..4808684607b9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -101,6 +101,15 @@ void init_rt_rq(struct rt_rq *rt_rq)
raw_spin_lock_init(_rq->rt_runtime_lock);
 }
 
+/**
+ * prio_lower(a, b) returns true if the priority a is
+ * lower than the priority b, otherwise return false.
+ */
+static inline bool prio_lower(int a, int b)
+{
+   return a > b;
+}
+
 #ifdef CONFIG_RT_GROUP_SCHED
 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
@@ -262,7 +271,7 @@ static void pull_rt_task(struct rq *this_rq);
 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 {
/* Try to pull RT tasks here if we lower this rq's prio */
-   return rq->rt.highest_prio.curr > prev->prio;
+   return prio_lower(rq->rt.highest_prio.curr, prev->prio);
 }
 
 static inline int rt_overloaded(struct rq *rq)
@@ -377,7 +386,7 @@ static void enqueue_pushable_task(struct rq *rq, struct 
task_struct *p)
plist_add(>pushable_tasks, >rt.pushable_tasks);
 
/* Update the highest prio pushable task */
-   if (p->prio < rq->rt.highest_prio.next)
+   if (prio_lower(rq->rt.highest_prio.next, p->prio))
rq->rt.highest_prio.next = p->prio;
 }
 
@@ -498,7 +507,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
 
-   if (rt_rq->highest_prio.curr < curr->prio)
+   if (prio_lower(curr->prio, rt_rq->highest_prio.curr))
resched_curr(rq);
}
 }
@@ -1044,7 +1053,7 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int 
prev_prio)
if (>rt != rt_rq)
return;
 #endif
-   if (rq->online && prio < prev_prio)
+   if (rq->online && prio_lower(prev_prio, prio))
cpupri_set(>rd->cpupri, rq->cpu, prio);
 }
 
@@ -1079,7 +1088,7 @@ inc_rt_prio(struct rt_rq *rt_rq, int prio)
 {
int prev_prio = rt_rq->highest_prio.curr;
 
-   if (prio < prev_prio)
+   if (prio_lower(prev_prio, prio))
rt_rq->highest_prio.curr = prio;
 
inc_rt_prio_smp(rt_rq, prio, prev_prio);
@@ -1092,7 +1101,7 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
 
if (rt_rq->rt_nr_running) {
 
-   WARN_ON(prio < prev_prio);
+   WARN_ON(prio_lower(prev_prio, prio));
 
/*
 * This may have been our highest task, and therefore
@@ -1424,7 +1433,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
sd_flag, int flags)
 */
if (curr && unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
-curr->prio <= p->prio)) {
+!prio_lower(curr->prio, p->prio))) {
int target = find_lowest_rq(p);
 
/*
@@ -1432,7 +1441,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
sd_flag, int flags)
 * not running a lower priority task.
 */
if (target != -1 &&
-   p->prio < cpu_rq(target)->rt.highest_prio.curr)
+   prio_lower(cpu_rq(target)->rt.highest_prio.curr, p->prio))
cpu = target;
}
rcu_read_unlock();
@@ -1475,7 +1484,7 @@ static void check_preempt_equal_prio(struct rq *rq, 
struct task_struct *p)
  */
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int 
flags)
 {
-   if (p->prio < rq->curr->prio) {
+   if (prio_lower(rq->curr->prio, p->prio)) {
resched_curr(rq);
return;
}
@@ -1732,7 +1741,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct 
*task, struct rq *rq)
 
lowest_rq = cpu_rq(cpu);
 
-   if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+   if (!prio_lower(lowest_rq->rt.highest_prio.curr, task->prio)) {
/*
 * Target rq has tasks of equal or higher priority,
 * retrying does not release any lock and is unlikely
@@ -1763,7 +1772,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct 
*task,