My previous patch:

("sched/cputime: Improve scalability of times()/clock_gettime() on 32 bit cpus")

add new sum_exec_runtime_seqclock fields to sched_entity struct.

This changed the alignment of other fields of the sched_entity struct.
To preserve previous alignment, which makes variables commonly used
together are kept in a cache line, make nr_migrations field u32 on
32 bit architectures.

This field is used only for debugging and basically nr_migrations
should not be enormous number.

Signed-off-by: Stanislaw Gruszka <[email protected]>
---
 include/linux/sched.h | 5 +++--
 kernel/sched/debug.c  | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 65714bc..b9d9333 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1332,9 +1332,10 @@ struct sched_entity {
        u64                     vruntime;
        u64                     prev_sum_exec_runtime;
 
+#ifdef CONFIG_64BIT
        u64                     nr_migrations;
-
-#ifndef CONFIG_64BIT
+#else
+       u32                     nr_migrations;
        seqcount_t              sum_exec_runtime_seqcount;
 #endif
 
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2a0a999..db85816 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -921,8 +921,8 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
 
                avg_per_cpu = p->se.sum_exec_runtime;
                if (p->se.nr_migrations) {
-                       avg_per_cpu = div64_u64(avg_per_cpu,
-                                               p->se.nr_migrations);
+                       avg_per_cpu = div64_ul(avg_per_cpu,
+                                              p->se.nr_migrations);
                } else {
                        avg_per_cpu = -1LL;
                }
-- 
1.8.3.1

Reply via email to