[PATCH 2/5] sched/fair: Replace cfs_rq->rb_leftmost

2017-05-29 Thread Davidlohr Bueso
... with the generic rbtree flavor instead. No changes
in semantics whatsoever.

Signed-off-by: Davidlohr Bueso 
---
 kernel/sched/debug.c |  2 +-
 kernel/sched/fair.c  | 35 +++
 kernel/sched/sched.h |  3 +--
 3 files changed, 13 insertions(+), 27 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 38f019324f1a..85df87666f6e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -488,7 +488,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct 
cfs_rq *cfs_rq)
SPLIT_NS(cfs_rq->exec_clock));
 
raw_spin_lock_irqsave(>lock, flags);
-   if (cfs_rq->rb_leftmost)
+   if (cfs_rq->tasks_timeline.rb_leftmost)
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 47a0c552c77b..63b0c5f4770a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -523,8 +523,8 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
curr = NULL;
}
 
-   if (cfs_rq->rb_leftmost) {
-   struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
+   if (cfs_rq->tasks_timeline.rb_leftmost) {
+   struct sched_entity *se = 
rb_entry(cfs_rq->tasks_timeline.rb_leftmost,
   struct sched_entity,
   run_node);
 
@@ -547,10 +547,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-   struct rb_node **link = _rq->tasks_timeline.rb_node;
+   struct rb_node **link = _rq->tasks_timeline.rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_entity *entry;
-   int leftmost = 1;
+   bool leftmost = true;
 
/*
 * Find the right place in the rbtree:
@@ -566,36 +566,23 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, 
struct sched_entity *se)
link = >rb_left;
} else {
link = >rb_right;
-   leftmost = 0;
+   leftmost = false;
}
}
 
-   /*
-* Maintain a cache of leftmost tree entries (it is frequently
-* used):
-*/
-   if (leftmost)
-   cfs_rq->rb_leftmost = >run_node;
-
rb_link_node(>run_node, parent, link);
-   rb_insert_color(>run_node, _rq->tasks_timeline);
+   rb_insert_color_cached(>run_node,
+  _rq->tasks_timeline, leftmost);
 }
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-   if (cfs_rq->rb_leftmost == >run_node) {
-   struct rb_node *next_node;
-
-   next_node = rb_next(>run_node);
-   cfs_rq->rb_leftmost = next_node;
-   }
-
-   rb_erase(>run_node, _rq->tasks_timeline);
+   rb_erase_cached(>run_node, _rq->tasks_timeline);
 }
 
 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
-   struct rb_node *left = cfs_rq->rb_leftmost;
+   struct rb_node *left = cfs_rq->tasks_timeline.rb_leftmost;
 
if (!left)
return NULL;
@@ -616,7 +603,7 @@ static struct sched_entity *__pick_next_entity(struct 
sched_entity *se)
 #ifdef CONFIG_SCHED_DEBUG
 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
-   struct rb_node *last = rb_last(_rq->tasks_timeline);
+   struct rb_node *last = rb_last(_rq->tasks_timeline.rb_root);
 
if (!last)
return NULL;
@@ -9235,7 +9222,7 @@ static void set_curr_task_fair(struct rq *rq)
 
 void init_cfs_rq(struct cfs_rq *cfs_rq)
 {
-   cfs_rq->tasks_timeline = RB_ROOT;
+   cfs_rq->tasks_timeline = RB_ROOT_CACHED;
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
 #ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f8cf1d87f065..6fe986abfeee 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,8 +404,7 @@ struct cfs_rq {
u64 min_vruntime_copy;
 #endif
 
-   struct rb_root tasks_timeline;
-   struct rb_node *rb_leftmost;
+   struct rb_root_cached tasks_timeline;
 
/*
 * 'curr' points to currently running entity on this cfs_rq.
-- 
2.12.0



[PATCH 2/5] sched/fair: Replace cfs_rq->rb_leftmost

2017-05-29 Thread Davidlohr Bueso
... with the generic rbtree flavor instead. No changes
in semantics whatsoever.

Signed-off-by: Davidlohr Bueso 
---
 kernel/sched/debug.c |  2 +-
 kernel/sched/fair.c  | 35 +++
 kernel/sched/sched.h |  3 +--
 3 files changed, 13 insertions(+), 27 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 38f019324f1a..85df87666f6e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -488,7 +488,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct 
cfs_rq *cfs_rq)
SPLIT_NS(cfs_rq->exec_clock));
 
raw_spin_lock_irqsave(>lock, flags);
-   if (cfs_rq->rb_leftmost)
+   if (cfs_rq->tasks_timeline.rb_leftmost)
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 47a0c552c77b..63b0c5f4770a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -523,8 +523,8 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
curr = NULL;
}
 
-   if (cfs_rq->rb_leftmost) {
-   struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
+   if (cfs_rq->tasks_timeline.rb_leftmost) {
+   struct sched_entity *se = 
rb_entry(cfs_rq->tasks_timeline.rb_leftmost,
   struct sched_entity,
   run_node);
 
@@ -547,10 +547,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-   struct rb_node **link = _rq->tasks_timeline.rb_node;
+   struct rb_node **link = _rq->tasks_timeline.rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_entity *entry;
-   int leftmost = 1;
+   bool leftmost = true;
 
/*
 * Find the right place in the rbtree:
@@ -566,36 +566,23 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, 
struct sched_entity *se)
link = >rb_left;
} else {
link = >rb_right;
-   leftmost = 0;
+   leftmost = false;
}
}
 
-   /*
-* Maintain a cache of leftmost tree entries (it is frequently
-* used):
-*/
-   if (leftmost)
-   cfs_rq->rb_leftmost = >run_node;
-
rb_link_node(>run_node, parent, link);
-   rb_insert_color(>run_node, _rq->tasks_timeline);
+   rb_insert_color_cached(>run_node,
+  _rq->tasks_timeline, leftmost);
 }
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-   if (cfs_rq->rb_leftmost == >run_node) {
-   struct rb_node *next_node;
-
-   next_node = rb_next(>run_node);
-   cfs_rq->rb_leftmost = next_node;
-   }
-
-   rb_erase(>run_node, _rq->tasks_timeline);
+   rb_erase_cached(>run_node, _rq->tasks_timeline);
 }
 
 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
-   struct rb_node *left = cfs_rq->rb_leftmost;
+   struct rb_node *left = cfs_rq->tasks_timeline.rb_leftmost;
 
if (!left)
return NULL;
@@ -616,7 +603,7 @@ static struct sched_entity *__pick_next_entity(struct 
sched_entity *se)
 #ifdef CONFIG_SCHED_DEBUG
 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
-   struct rb_node *last = rb_last(_rq->tasks_timeline);
+   struct rb_node *last = rb_last(_rq->tasks_timeline.rb_root);
 
if (!last)
return NULL;
@@ -9235,7 +9222,7 @@ static void set_curr_task_fair(struct rq *rq)
 
 void init_cfs_rq(struct cfs_rq *cfs_rq)
 {
-   cfs_rq->tasks_timeline = RB_ROOT;
+   cfs_rq->tasks_timeline = RB_ROOT_CACHED;
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
 #ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f8cf1d87f065..6fe986abfeee 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,8 +404,7 @@ struct cfs_rq {
u64 min_vruntime_copy;
 #endif
 
-   struct rb_root tasks_timeline;
-   struct rb_node *rb_leftmost;
+   struct rb_root_cached tasks_timeline;
 
/*
 * 'curr' points to currently running entity on this cfs_rq.
-- 
2.12.0