Re: [PATCH 4/4] sched: Conditionally build cpu load decay code for nohz

2016-04-02 Thread Peter Zijlstra
On Fri, Apr 01, 2016 at 03:23:07PM +0200, Frederic Weisbecker wrote:
> To complete the tick periodic kernel optimizations.

-ENOCHANGELOG


Re: [PATCH 4/4] sched: Conditionally build cpu load decay code for nohz

2016-04-02 Thread Peter Zijlstra
On Fri, Apr 01, 2016 at 03:23:07PM +0200, Frederic Weisbecker wrote:
> To complete the tick periodic kernel optimizations.

-ENOCHANGELOG


[PATCH 4/4] sched: Conditionally build cpu load decay code for nohz

2016-04-01 Thread Frederic Weisbecker
To complete the tick periodic kernel optimizations.

Cc: Byungchul Park 
Cc: Chris Metcalf 
Cc: Christoph Lameter 
Cc: Ingo Molnar 
Cc: Luiz Capitulino 
Cc: Mike Galbraith 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Rik van Riel 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 kernel/sched/fair.c | 41 ++---
 1 file changed, 30 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1bb053e..0bb872e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4423,6 +4423,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
 }
 
 #ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ_COMMON
 
 /*
  * per rq 'load' arrray crap; XXX kill this.
@@ -4490,6 +4491,33 @@ decay_load_missed(unsigned long load, unsigned long 
missed_updates, int idx)
return load;
 }
 
+static unsigned long
+cpu_load_update_missed(unsigned long old_load, unsigned long tickless_load,
+  unsigned long pending_updates, int idx)
+{
+   old_load = decay_load_missed(old_load, pending_updates - 1, idx);
+   if (tickless_load) {
+   old_load -= decay_load_missed(tickless_load, pending_updates - 
1, idx);
+   /*
+* old_load can never be a negative value because a
+* decayed tickless_load cannot be greater than the
+* original tickless_load.
+*/
+   old_load += tickless_load;
+   }
+   return old_load;
+}
+#else /* !CONFIG_NO_HZ_COMMON */
+
+static inline unsigned long
+cpu_load_update_missed(unsigned long old_load, unsigned long tickless_load,
+  unsigned long pending_updates, int idx)
+{
+   return old_load;
+}
+
+#endif /* CONFIG_NO_HZ_COMMON */
+
 /**
  * __cpu_load_update - update the rq->cpu_load[] statistics
  * @this_rq: The rq to update statistics for
@@ -4541,17 +4569,8 @@ static void cpu_load_update(struct rq *this_rq, unsigned 
long this_load,
 
/* scale is effectively 1 << i now, and >> i divides by scale */
 
-   old_load = this_rq->cpu_load[i];
-   old_load = decay_load_missed(old_load, pending_updates - 1, i);
-   if (tickless_load) {
-   old_load -= decay_load_missed(tickless_load, 
pending_updates - 1, i);
-   /*
-* old_load can never be a negative value because a
-* decayed tickless_load cannot be greater than the
-* original tickless_load.
-*/
-   old_load += tickless_load;
-   }
+   old_load = cpu_load_update_missed(this_rq->cpu_load[i],
+ tickless_load, 
pending_updates, i);
new_load = this_load;
/*
 * Round up the averaging division if load is increasing. This
-- 
2.7.0



[PATCH 4/4] sched: Conditionally build cpu load decay code for nohz

2016-04-01 Thread Frederic Weisbecker
To complete the tick periodic kernel optimizations.

Cc: Byungchul Park 
Cc: Chris Metcalf 
Cc: Christoph Lameter 
Cc: Ingo Molnar 
Cc: Luiz Capitulino 
Cc: Mike Galbraith 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Rik van Riel 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 kernel/sched/fair.c | 41 ++---
 1 file changed, 30 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1bb053e..0bb872e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4423,6 +4423,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
 }
 
 #ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ_COMMON
 
 /*
  * per rq 'load' arrray crap; XXX kill this.
@@ -4490,6 +4491,33 @@ decay_load_missed(unsigned long load, unsigned long 
missed_updates, int idx)
return load;
 }
 
+static unsigned long
+cpu_load_update_missed(unsigned long old_load, unsigned long tickless_load,
+  unsigned long pending_updates, int idx)
+{
+   old_load = decay_load_missed(old_load, pending_updates - 1, idx);
+   if (tickless_load) {
+   old_load -= decay_load_missed(tickless_load, pending_updates - 
1, idx);
+   /*
+* old_load can never be a negative value because a
+* decayed tickless_load cannot be greater than the
+* original tickless_load.
+*/
+   old_load += tickless_load;
+   }
+   return old_load;
+}
+#else /* !CONFIG_NO_HZ_COMMON */
+
+static inline unsigned long
+cpu_load_update_missed(unsigned long old_load, unsigned long tickless_load,
+  unsigned long pending_updates, int idx)
+{
+   return old_load;
+}
+
+#endif /* CONFIG_NO_HZ_COMMON */
+
 /**
  * __cpu_load_update - update the rq->cpu_load[] statistics
  * @this_rq: The rq to update statistics for
@@ -4541,17 +4569,8 @@ static void cpu_load_update(struct rq *this_rq, unsigned 
long this_load,
 
/* scale is effectively 1 << i now, and >> i divides by scale */
 
-   old_load = this_rq->cpu_load[i];
-   old_load = decay_load_missed(old_load, pending_updates - 1, i);
-   if (tickless_load) {
-   old_load -= decay_load_missed(tickless_load, 
pending_updates - 1, i);
-   /*
-* old_load can never be a negative value because a
-* decayed tickless_load cannot be greater than the
-* original tickless_load.
-*/
-   old_load += tickless_load;
-   }
+   old_load = cpu_load_update_missed(this_rq->cpu_load[i],
+ tickless_load, 
pending_updates, i);
new_load = this_load;
/*
 * Round up the averaging division if load is increasing. This
-- 
2.7.0