Re: [PATCH 1/4] sched: Gather cpu load functions under a common namespace

2016-04-02 Thread Frederic Weisbecker
On Sat, Apr 02, 2016 at 09:09:08AM +0200, Peter Zijlstra wrote:
> On Fri, Apr 01, 2016 at 03:23:04PM +0200, Frederic Weisbecker wrote:
> > This way they are easily grep'able and recognized.
> 
> Please mention the actual renames done and the new naming scheme.

Right, I'll add more details.

Thanks.


Re: [PATCH 1/4] sched: Gather cpu load functions under a common namespace

2016-04-02 Thread Peter Zijlstra
On Fri, Apr 01, 2016 at 03:23:04PM +0200, Frederic Weisbecker wrote:
> This way they are easily grep'able and recognized.

Please mention the actual renames done and the new naming scheme.



[PATCH 1/4] sched: Gather cpu load functions under a common namespace

2016-04-01 Thread Frederic Weisbecker
This way they are easily grep'able and recognized.

Cc: Byungchul Park 
Cc: Chris Metcalf 
Cc: Christoph Lameter 
Cc: Luiz Capitulino 
Cc: Mike Galbraith 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Rik van Riel 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 Documentation/trace/ftrace.txt | 10 +-
 include/linux/sched.h  |  4 ++--
 kernel/sched/core.c|  2 +-
 kernel/sched/fair.c| 24 
 kernel/sched/sched.h   |  4 ++--
 kernel/time/tick-sched.c   |  2 +-
 6 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index f52f297..9857606 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1562,12 +1562,12 @@ Doing the same with chrt -r 5 and function-trace set.
   -0   3dN.1   12us : menu_hrtimer_cancel <-tick_nohz_idle_exit
   -0   3dN.1   12us : ktime_get <-tick_nohz_idle_exit
   -0   3dN.1   12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit
-  -0   3dN.1   13us : update_cpu_load_nohz <-tick_nohz_idle_exit
-  -0   3dN.1   13us : _raw_spin_lock <-update_cpu_load_nohz
+  -0   3dN.1   13us : cpu_load_update_nohz <-tick_nohz_idle_exit
+  -0   3dN.1   13us : _raw_spin_lock <-cpu_load_update_nohz
   -0   3dN.1   13us : add_preempt_count <-_raw_spin_lock
-  -0   3dN.2   13us : __update_cpu_load <-update_cpu_load_nohz
-  -0   3dN.2   14us : sched_avg_update <-__update_cpu_load
-  -0   3dN.2   14us : _raw_spin_unlock <-update_cpu_load_nohz
+  -0   3dN.2   13us : __cpu_load_update <-cpu_load_update_nohz
+  -0   3dN.2   14us : sched_avg_update <-__cpu_load_update
+  -0   3dN.2   14us : _raw_spin_unlock <-cpu_load_update_nohz
   -0   3dN.2   14us : sub_preempt_count <-_raw_spin_unlock
   -0   3dN.1   15us : calc_load_exit_idle <-tick_nohz_idle_exit
   -0   3dN.1   15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 60bba7e..86adc0e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,9 +178,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, 
unsigned long *load);
 extern void calc_global_load(unsigned long ticks);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void update_cpu_load_nohz(int active);
+extern void cpu_load_update_nohz(int active);
 #else
-static inline void update_cpu_load_nohz(int active) { }
+static inline void cpu_load_update_nohz(int active) { }
 #endif
 
 extern void dump_cpu_task(int cpu);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8465ee..e507329 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2897,7 +2897,7 @@ void scheduler_tick(void)
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
-   update_cpu_load_active(rq);
+   cpu_load_update_active(rq);
calc_global_load_tick(rq);
raw_spin_unlock(&rq->lock);
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0fe30e66..f33764d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4491,7 +4491,7 @@ decay_load_missed(unsigned long load, unsigned long 
missed_updates, int idx)
 }
 
 /**
- * __update_cpu_load - update the rq->cpu_load[] statistics
+ * __cpu_load_update - update the rq->cpu_load[] statistics
  * @this_rq: The rq to update statistics for
  * @this_load: The current load
  * @pending_updates: The number of missed updates
@@ -4526,7 +4526,7 @@ decay_load_missed(unsigned long load, unsigned long 
missed_updates, int idx)
  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
  * term. See the @active paramter.
  */
-static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+static void __cpu_load_update(struct rq *this_rq, unsigned long this_load,
  unsigned long pending_updates, int active)
 {
unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
@@ -4574,7 +4574,7 @@ static unsigned long weighted_cpuload(const int cpu)
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
-static void __update_cpu_load_nohz(struct rq *this_rq,
+static void __cpu_load_update_nohz(struct rq *this_rq,
   unsigned long curr_jiffies,
   unsigned long load,
   int active)
@@ -4589,7 +4589,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
 * In the NOHZ_FULL case, we were non-idle, we should consider
 * its weighted load.
 */
-   __update_cpu_load(this_rq, load, pending_updates, active);
+   __cpu_load_update(this_rq, load, pending_updates, active);
}
 }
 
@@ -4610,7 +4610,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
  * Called from nohz_idle_balance() to update the load ratings before doin