Commit-ID:  317d359df95dd0cb7653d09b7fc513770590cf85
Gitweb:     https://git.kernel.org/tip/317d359df95dd0cb7653d09b7fc513770590cf85
Author:     Peter Zijlstra <pet...@infradead.org>
AuthorDate: Thu, 5 Apr 2018 10:05:21 +0200
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Thu, 5 Apr 2018 10:56:16 +0200

sched/core: Force proper alignment of 'struct util_est'

For some as yet not understood reason, Tony gets unaligned access
traps on IA64 because of:

  struct util_est ue = READ_ONCE(p->se.avg.util_est);

and:

  WRITE_ONCE(p->se.avg.util_est, ue);

introduced by commit:

  d519329f72a6 ("sched/fair: Update util_est only on util_avg updates")

Normally those two fields should end up on an 8-byte aligned location,
but UP and RANDSTRUCT can mess that up so enforce the alignment
explicitly.

Also make the alignment on sched_avg unconditional, as it is really
about data locality, not false-sharing.

With or without this patch the layout for sched_avg on a
ia64-defconfig build looks like:

        $ pahole -EC sched_avg ia64-defconfig/kernel/sched/core.o
        die__process_function: tag not supported (INVALID)!
        struct sched_avg {
                /* typedef u64 */ long long unsigned int     last_update_time;  
                 /*     0     8 */
                /* typedef u64 */ long long unsigned int     load_sum;          
                 /*     8     8 */
                /* typedef u64 */ long long unsigned int     runnable_load_sum; 
                 /*    16     8 */
                /* typedef u32 */ unsigned int               util_sum;          
                 /*    24     4 */
                /* typedef u32 */ unsigned int               period_contrib;    
                 /*    28     4 */
                long unsigned int          load_avg;                            
                 /*    32     8 */
                long unsigned int          runnable_load_avg;                   
                 /*    40     8 */
                long unsigned int          util_avg;                            
                 /*    48     8 */
                struct util_est {
                        unsigned int       enqueued;                            
                 /*    56     4 */
                        unsigned int       ewma;                                
                 /*    60     4 */
                } util_est; /*    56     8 */
                /* --- cacheline 1 boundary (64 bytes) --- */

                /* size: 64, cachelines: 1, members: 9 */
        };

Reported-and-Tested-by: Tony Luck <tony.l...@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Arnaldo Carvalho de Melo <a...@redhat.com>
Cc: Frederic Weisbecker <frede...@kernel.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Mel Gorman <mgor...@techsingularity.net>
Cc: Norbert Manthey <nmant...@amazon.de>
Cc: Patrick Bellasi <patrick.bell...@arm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Tony <tony.l...@intel.com>
Cc: Vincent Guittot <vincent.guit...@linaro.org>
Fixes: d519329f72a6 ("sched/fair: Update util_est only on util_avg updates")
Link: 
http://lkml.kernel.org/r/20180405080521.gg4...@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/sched.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f228c6033832..b3d697f3b573 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -300,7 +300,7 @@ struct util_est {
        unsigned int                    enqueued;
        unsigned int                    ewma;
 #define UTIL_EST_WEIGHT_SHIFT          2
-};
+} __attribute__((__aligned__(sizeof(u64))));
 
 /*
  * The load_avg/util_avg accumulates an infinite geometric series
@@ -364,7 +364,7 @@ struct sched_avg {
        unsigned long                   runnable_load_avg;
        unsigned long                   util_avg;
        struct util_est                 util_est;
-};
+} ____cacheline_aligned;
 
 struct sched_statistics {
 #ifdef CONFIG_SCHEDSTATS
@@ -435,7 +435,7 @@ struct sched_entity {
         * Put into separate cache line so it does not
         * collide with read-mostly values above.
         */
-       struct sched_avg                avg ____cacheline_aligned_in_smp;
+       struct sched_avg                avg;
 #endif
 };
 

Reply via email to