FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/entry
commit fa58aafc44805ac425d17c6a8082513b5442ce9d ("x86/entry/64: When returning 
via SYSRET, POP regs instead of using MOV")


=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
  lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/new_raph

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
    184099 ±  0%     +10.8%     204000 ±  0%  aim7.jobs-per-min
    131.89 ±  0%      -9.8%     119.00 ±  0%  aim7.time.elapsed_time
    131.89 ±  0%      -9.8%     119.00 ±  0%  aim7.time.elapsed_time.max
   2215262 ±  0%     -92.5%     165275 ±  0%  
aim7.time.involuntary_context_switches
     19.56 ±  1%     -65.8%       6.70 ±  5%  aim7.time.system_time
    435.63 ±  0%      -2.8%     423.34 ±  0%  aim7.time.user_time
     60385 ±  1%     -17.3%      49927 ±  0%  
aim7.time.voluntary_context_switches
    131.89 ±  0%      -9.8%     119.00 ±  0%  time.elapsed_time
    131.89 ±  0%      -9.8%     119.00 ±  0%  time.elapsed_time.max
   2215262 ±  0%     -92.5%     165275 ±  0%  time.involuntary_context_switches
     19.56 ±  1%     -65.8%       6.70 ±  5%  time.system_time
     60385 ±  1%     -17.3%      49927 ±  0%  time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
  lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/pipe_cpy

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
    247245 ±  1%     +15.6%     285751 ±  1%  aim7.jobs-per-min
     98.61 ±  1%     -13.4%      85.37 ±  1%  aim7.time.elapsed_time
     98.61 ±  1%     -13.4%      85.37 ±  1%  aim7.time.elapsed_time.max
   2003598 ±  0%     -93.3%     133967 ±  2%  
aim7.time.involuntary_context_switches
    266.80 ±  1%      -7.1%     247.73 ±  1%  aim7.time.system_time
     51.41 ±  4%     -11.8%      45.32 ±  7%  aim7.time.user_time
     53934 ±  1%     -21.5%      42329 ±  1%  
aim7.time.voluntary_context_switches
     98.61 ±  1%     -13.4%      85.37 ±  1%  time.elapsed_time
     98.61 ±  1%     -13.4%      85.37 ±  1%  time.elapsed_time.max
   2003598 ±  0%     -93.3%     133967 ±  2%  time.involuntary_context_switches
     51.41 ±  4%     -11.8%      45.32 ±  7%  time.user_time
     53934 ±  1%     -21.5%      42329 ±  1%  time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads:
  
lkp-a06/dbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   2245160 ±  8%     -76.6%     526406 ±  4%  
dbench.time.involuntary_context_switches
    379.50 ±  0%      +1.3%     384.50 ±  0%  
dbench.time.percent_of_cpu_this_job_got
      1715 ±  0%      +1.7%       1745 ±  0%  dbench.time.system_time
   2245160 ±  8%     -76.6%     526406 ±  4%  time.involuntary_context_switches
      2.69 ± 11%     +81.5%       4.88 ± 37%  
perf-profile.cpu-cycles.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
      1.43 ±  2%     -10.1%       1.29 ±  2%  
perf-profile.cpu-cycles.entry_SYSCALL_64_after_swapgs
      1.51 ±  8%     -26.2%       1.11 ± 10%  
perf-profile.cpu-cycles.rcu_nocb_kthread.kthread.ret_from_fork
      1.20 ± 15%    +109.4%       2.51 ± 46%  
perf-profile.cpu-cycles.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
      0.68 ± 16%    +110.7%       1.43 ± 47%  
perf-profile.cpu-cycles.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer
      1.97 ± 11%     +96.4%       3.87 ± 40%  
perf-profile.cpu-cycles.tick_sched_handle.isra.17.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt
      2.33 ± 10%     +84.8%       4.30 ± 38%  
perf-profile.cpu-cycles.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
      1.90 ± 11%     +96.3%       3.72 ± 41%  
perf-profile.cpu-cycles.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt
      1.07 ±  2%      -9.3%       0.97 ±  3%  
perf-profile.cpu-cycles.vfs_create.path_openat.do_filp_open.do_sys_open.sys_open

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
  
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/ext4/8K/400M/fsyncBeforeClose/16d/256fpd

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
     10935 ±  7%     -38.1%       6768 ±  1%  
fsmark.time.involuntary_context_switches
      2989 ±  0%      +1.2%       3026 ±  0%  
fsmark.time.maximum_resident_set_size
     10935 ±  7%     -38.1%       6768 ±  1%  time.involuntary_context_switches
     29861 ±  3%     -86.7%       3970 ±  1%  vmstat.system.cs
     13362 ±  3%     -97.0%     405.25 ±  1%  vmstat.system.in
  76414335 ±  1%     -55.4%   34106888 ±  4%  cpuidle.C1-NHM.time
   4836217 ±  0%     -92.9%     344308 ±  4%  cpuidle.C1-NHM.usage
      1310 ±  4%     -96.7%      43.00 ± 10%  cpuidle.POLL.usage
      1.32 ±  2%     -43.9%       0.74 ±  0%  turbostat.%Busy
     39.25 ±  2%     -51.6%      19.00 ±  0%  turbostat.Avg_MHz
      2985 ±  0%     -15.9%       2512 ±  0%  turbostat.Bzy_MHz
      7.68 ±  5%     -42.2%       4.44 ±  3%  turbostat.CPU%c1
      0.00 ± -1%      +Inf%      20233 ±125%  
latency_stats.avg.submit_bio_wait.blkdev_issue_flush.jbd2_cleanup_journal_tail.jbd2_log_do_checkpoint.__jbd2_log_wait_for_space.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.__ext4_new_inode.ext4_mkdir.vfs_mkdir.SyS_mkdir
      4866 ± 28%     +42.4%       6930 ±141%  
latency_stats.max.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_update_time.file_update_time.__generic_file_write_iter.ext4_file_write_iter.__vfs_write
      8314 ± 73%    +365.2%      38680 ± 69%  
latency_stats.max.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
      0.00 ± -1%      +Inf%      24939 ±105%  
latency_stats.max.submit_bio_wait.blkdev_issue_flush.jbd2_cleanup_journal_tail.jbd2_log_do_checkpoint.__jbd2_log_wait_for_space.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.__ext4_new_inode.ext4_mkdir.vfs_mkdir.SyS_mkdir
      0.00 ± -1%      +Inf%      24960 ±105%  
latency_stats.sum.submit_bio_wait.blkdev_issue_flush.jbd2_cleanup_journal_tail.jbd2_log_do_checkpoint.__jbd2_log_wait_for_space.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.__ext4_new_inode.ext4_mkdir.vfs_mkdir.SyS_mkdir
      5099 ±  5%      +8.2%       5517 ±  5%  
sched_debug.cfs_rq[0]:/.min_vruntime
    533.25 ±  3%      -9.6%     482.25 ±  2%  
sched_debug.cfs_rq[0]:/.tg->runnable_avg
    537.75 ±  3%      -9.9%     484.75 ±  2%  
sched_debug.cfs_rq[1]:/.tg->runnable_avg
     11.50 ± 35%     +65.2%      19.00 ± 11%  
sched_debug.cfs_rq[2]:/.nr_spread_over
    538.50 ±  3%      -9.7%     486.50 ±  2%  
sched_debug.cfs_rq[2]:/.tg->runnable_avg
     -1924 ±-24%     +34.3%      -2583 ±-12%  sched_debug.cfs_rq[3]:/.spread0
    539.75 ±  3%     -10.4%     483.75 ±  2%  
sched_debug.cfs_rq[3]:/.tg->runnable_avg
      1006 ± 13%     +17.2%       1179 ±  5%  sched_debug.cfs_rq[4]:/.exec_clock
      2780 ± 16%     +20.9%       3361 ±  7%  
sched_debug.cfs_rq[4]:/.min_vruntime
    542.75 ±  3%     -10.7%     484.50 ±  2%  
sched_debug.cfs_rq[4]:/.tg->runnable_avg
      2626 ±  5%     +41.7%       3723 ± 12%  
sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
      2463 ±  8%     +16.3%       2865 ±  7%  
sched_debug.cfs_rq[5]:/.min_vruntime
    547.00 ±  4%     -11.4%     484.50 ±  2%  
sched_debug.cfs_rq[5]:/.tg->runnable_avg
     56.75 ±  4%     +41.9%      80.50 ± 13%  
sched_debug.cfs_rq[5]:/.tg_runnable_contrib
    909.00 ± 74%    +241.7%       3105 ±  4%  
sched_debug.cfs_rq[6]:/.blocked_load_avg
    549.00 ±  4%     -11.5%     486.00 ±  2%  
sched_debug.cfs_rq[6]:/.tg->runnable_avg
    927.25 ± 71%    +240.7%       3158 ±  6%  
sched_debug.cfs_rq[6]:/.tg_load_contrib
      4572 ± 22%     -49.6%       2303 ± 27%  
sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
     -1634 ±-23%     +55.2%      -2535 ±-19%  sched_debug.cfs_rq[7]:/.spread0
    551.00 ±  4%     -11.4%     488.25 ±  3%  
sched_debug.cfs_rq[7]:/.tg->runnable_avg
     98.00 ± 22%     -49.7%      49.25 ± 27%  
sched_debug.cfs_rq[7]:/.tg_runnable_contrib
     -9609 ± -7%     +10.0%     -10571 ± -1%  
sched_debug.cpu#0.nr_uninterruptible
     15.50 ± 79%     -91.9%       1.25 ±173%  sched_debug.cpu#2.cpu_load[1]
     12.75 ± 58%     -76.5%       3.00 ±117%  sched_debug.cpu#2.cpu_load[2]
     11.75 ± 42%     -70.2%       3.50 ± 95%  sched_debug.cpu#2.cpu_load[3]
     11.00 ± 39%     -68.2%       3.50 ± 82%  sched_debug.cpu#2.cpu_load[4]
    851076 ±155%     -93.9%      52140 ± 38%  sched_debug.cpu#3.nr_switches
      1395 ±  4%      -8.7%       1274 ±  1%  
sched_debug.cpu#3.nr_uninterruptible
    851137 ±155%     -93.9%      52218 ± 38%  sched_debug.cpu#3.sched_count
    418288 ±157%     -94.6%      22436 ± 44%  sched_debug.cpu#3.sched_goidle
      6.00 ±100%    +150.0%      15.00 ± 30%  sched_debug.cpu#4.cpu_load[2]
      5.25 ± 76%    +157.1%      13.50 ± 19%  sched_debug.cpu#4.cpu_load[3]
      5.25 ± 72%    +123.8%      11.75 ± 20%  sched_debug.cpu#4.cpu_load[4]
      1507 ±  5%     +23.3%       1859 ±  5%  
sched_debug.cpu#5.nr_uninterruptible
    811411 ±  8%     +10.4%     895772 ±  6%  sched_debug.cpu#6.avg_idle
      1349 ± 13%     +38.2%       1863 ±  3%  
sched_debug.cpu#6.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
  
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/xfs/8K/400M/fsyncBeforeClose/16d/256fpd

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
     85071 ± 14%     -33.4%      56662 ±  0%  
fsmark.time.involuntary_context_switches
     44.50 ±  2%     +12.9%      50.25 ±  0%  
fsmark.time.percent_of_cpu_this_job_got
   1173823 ±  2%     +25.4%    1472245 ±  6%  
latency_stats.sum.down.xfs_buf_lock._xfs_buf_find.xfs_buf_get_map.xfs_buf_read_map.xfs_trans_read_buf_map.xfs_read_agi.xfs_ialloc_read_agi.xfs_dialloc.xfs_ialloc.xfs_dir_ialloc.xfs_create
     16393 ±  0%    +224.5%      53190 ±112%  softirqs.TIMER
     36.11 ±  2%    +148.9%      89.88 ± 94%  uptime.boot
    178.57 ±  4%    +241.2%     609.30 ±111%  uptime.idle
    178335 ±  0%     -80.3%      35149 ±  2%  vmstat.system.cs
     77433 ±  0%     -93.5%       5027 ±  2%  vmstat.system.in
     28135 ±  3%     -12.1%      24722 ±  1%  meminfo.Active(anon)
     27784 ±  3%     -12.3%      24365 ±  1%  meminfo.AnonPages
     14863 ±  2%     -14.8%      12659 ±  2%  meminfo.Mapped
      6993 ±  3%     -11.9%       6160 ±  1%  proc-vmstat.nr_active_anon
      6906 ±  3%     -12.0%       6075 ±  1%  proc-vmstat.nr_anon_pages
      3703 ±  2%     -14.9%       3152 ±  2%  proc-vmstat.nr_mapped
     85071 ± 14%     -33.4%      56662 ±  0%  time.involuntary_context_switches
     44.50 ±  2%     +12.9%      50.25 ±  0%  time.percent_of_cpu_this_job_got
      5.87 ±  1%     +13.5%       6.67 ±  1%  time.system_time
     10.71 ±  1%     -27.2%       7.79 ±  0%  turbostat.%Busy
    357.25 ±  1%     -34.9%     232.50 ±  0%  turbostat.Avg_MHz
      3333 ±  0%     -10.5%       2984 ±  0%  turbostat.Bzy_MHz
     48.21 ±  5%     -23.5%      36.86 ±  4%  turbostat.CPU%c1
     32.52 ±  5%     +22.7%      39.91 ±  5%  turbostat.CPU%c3
      8.56 ± 11%     +80.3%      15.43 ±  5%  turbostat.CPU%c6
  18315930 ±  4%     -46.6%    9777154 ±  8%  cpuidle.C1-NHM.time
   1153863 ±  2%     -94.6%      62163 ±  3%  cpuidle.C1-NHM.usage
     73216 ±  3%     +10.4%      80802 ±  3%  cpuidle.C3-NHM.usage
  22540985 ±  6%     +26.9%   28610584 ±  4%  cpuidle.C6-NHM.time
     10006 ±  8%     +10.7%      11072 ±  3%  cpuidle.C6-NHM.usage
     43036 ± 99%     -98.5%     641.00 ± 24%  cpuidle.POLL.time
     14491 ±104%     -99.6%      51.50 ± 21%  cpuidle.POLL.usage
     17223 ± 25%     -42.3%       9931 ± 35%  
sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
      2435 ±  2%     -10.7%       2174 ±  2%  
sched_debug.cfs_rq[0]:/.tg->runnable_avg
    379.00 ± 25%     -42.8%     216.75 ± 34%  
sched_debug.cfs_rq[0]:/.tg_runnable_contrib
      2432 ±  2%     -10.7%       2172 ±  2%  
sched_debug.cfs_rq[1]:/.tg->runnable_avg
     12047 ± 12%     +26.4%      15233 ±  4%  
sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
      1122 ± 11%     +18.6%       1331 ±  4%  
sched_debug.cfs_rq[2]:/.min_vruntime
     -2608 ± -9%     -16.4%      -2180 ±-12%  sched_debug.cfs_rq[2]:/.spread0
      2436 ±  2%     -10.9%       2170 ±  2%  
sched_debug.cfs_rq[2]:/.tg->runnable_avg
    262.50 ± 12%     +27.0%     333.50 ±  5%  
sched_debug.cfs_rq[2]:/.tg_runnable_contrib
      2435 ±  2%     -10.7%       2173 ±  2%  
sched_debug.cfs_rq[3]:/.tg->runnable_avg
      2050 ±120%    +731.3%      17041 ± 16%  
sched_debug.cfs_rq[4]:/.blocked_load_avg
      2433 ±  1%     -10.3%       2181 ±  2%  
sched_debug.cfs_rq[4]:/.tg->runnable_avg
      2073 ±121%    +731.2%      17235 ± 16%  
sched_debug.cfs_rq[4]:/.tg_load_contrib
      1043 ± 19%     -35.6%     672.06 ± 20%  
sched_debug.cfs_rq[5]:/.min_vruntime
      2433 ±  1%     -10.3%       2184 ±  2%  
sched_debug.cfs_rq[5]:/.tg->runnable_avg
      2433 ±  1%     -10.2%       2185 ±  2%  
sched_debug.cfs_rq[6]:/.tg->runnable_avg
     13519 ± 30%     -40.0%       8114 ± 35%  
sched_debug.cfs_rq[7]:/.blocked_load_avg
      2429 ±  1%     -10.1%       2185 ±  2%  
sched_debug.cfs_rq[7]:/.tg->runnable_avg
     13871 ± 30%     -39.9%       8331 ± 35%  
sched_debug.cfs_rq[7]:/.tg_load_contrib
    353549 ±  9%     +66.8%     589619 ± 40%  sched_debug.cpu#0.avg_idle
     21206 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#0.clock
     21206 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#0.clock_task
     21207 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#1.clock
     21207 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#1.clock_task
     21205 ±  3%    +253.9%      75035 ±113%  sched_debug.cpu#2.clock
     21205 ±  3%    +253.9%      75035 ±113%  sched_debug.cpu#2.clock_task
      5275 ± 21%     +95.3%      10300 ± 35%  sched_debug.cpu#2.nr_switches
      5280 ± 21%     +95.4%      10319 ± 35%  sched_debug.cpu#2.sched_count
      2298 ± 24%    +108.5%       4792 ± 37%  sched_debug.cpu#2.sched_goidle
      2377 ± 31%     +96.9%       4680 ± 34%  sched_debug.cpu#2.ttwu_count
    748.00 ± 47%    +284.9%       2879 ± 48%  sched_debug.cpu#2.ttwu_local
     21208 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#3.clock
     21208 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#3.clock_task
     21206 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#4.clock
     21206 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#4.clock_task
     73956 ±163%     -96.5%       2581 ± 41%  sched_debug.cpu#4.nr_switches
     73962 ±163%     -96.5%       2600 ± 42%  sched_debug.cpu#4.sched_count
     36498 ±165%     -97.4%     950.25 ± 60%  sched_debug.cpu#4.sched_goidle
    507768 ± 26%     +65.5%     840493 ± 20%  sched_debug.cpu#5.avg_idle
     21207 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#5.clock
     21207 ±  3%    +253.8%      75034 ±113%  sched_debug.cpu#5.clock_task
     44.75 ± 62%     -84.4%       7.00 ± 81%  sched_debug.cpu#5.cpu_load[1]
    779.25 ± 42%     +33.1%       1037 ± 34%  sched_debug.cpu#5.nr_load_updates
     21207 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#6.clock
     21207 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#6.clock_task
      1995 ± 11%     +21.6%       2427 ± 17%  sched_debug.cpu#6.nr_switches
      2001 ± 11%     +22.3%       2446 ± 17%  sched_debug.cpu#6.sched_count
     21206 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#7.clock
     21206 ±  3%    +253.8%      75035 ±113%  sched_debug.cpu#7.clock_task
     21207 ±  3%    +253.8%      75036 ±113%  sched_debug.cpu_clk
     21049 ±  3%    +255.7%      74876 ±113%  sched_debug.ktime
     21207 ±  3%    +253.8%      75036 ±113%  sched_debug.sched_clk

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
  
lituya/ftq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/100%/20x/100000ss

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
      7572 ±  0%      +6.2%       8040 ±  0%  ftq.counts
      0.18 ±  2%     -82.4%       0.03 ±  8%  ftq.stddev
   1737203 ±  0%     -99.1%      15898 ± 18%  
ftq.time.involuntary_context_switches
      1467 ±  0%      +3.4%       1517 ±  0%  
ftq.time.percent_of_cpu_this_job_got
    547.01 ±  0%      +3.5%     566.08 ±  0%  ftq.time.user_time
     16734 ±  0%     -13.5%      14475 ±  0%  meminfo.Mapped
      4181 ±  0%     -13.3%       3624 ±  0%  proc-vmstat.nr_mapped
      1.21 ±  3%     -53.7%       0.56 ±  2%  turbostat.CPU%c1
      5.76 ±  3%     +14.6%       6.61 ±  1%  turbostat.CPU%c6
     97309 ±  0%     -96.9%       2991 ±  2%  vmstat.system.cs
     62011 ±  0%     -76.5%      14573 ±  0%  vmstat.system.in
   1737203 ±  0%     -99.1%      15898 ± 18%  time.involuntary_context_switches
      2.07 ±  6%     -47.5%       1.09 ±  2%  time.system_time
    655.75 ± 36%     +55.8%       1021 ±  5%  time.voluntary_context_switches
   1917711 ± 27%     -91.5%     163688 ± 12%  cpuidle.C1-HSW.time
    144241 ±  3%     -99.6%     608.50 ± 13%  cpuidle.C1-HSW.usage
     13.25 ± 38%     -92.5%       1.00 ±100%  cpuidle.POLL.time
      7.00 ± 30%     -92.9%       0.50 ±100%  cpuidle.POLL.usage
      3330 ±  2%     -12.4%       2918 ±  2%  
sched_debug.cfs_rq[0]:/.tg->runnable_avg
     48305 ± 10%     +46.6%      70802 ±  3%  
sched_debug.cfs_rq[0]:/.tg_load_avg
      1737 ± 74%    +479.4%      10066 ± 59%  
sched_debug.cfs_rq[10]:/.blocked_load_avg
      3330 ±  2%     -12.3%       2922 ±  1%  
sched_debug.cfs_rq[10]:/.tg->runnable_avg
     47674 ± 10%     +46.5%      69861 ±  3%  
sched_debug.cfs_rq[10]:/.tg_load_avg
      1812 ± 73%    +457.2%      10098 ± 59%  
sched_debug.cfs_rq[10]:/.tg_load_contrib
     -4849 ± -2%     -17.4%      -4006 ±-17%  sched_debug.cfs_rq[11]:/.spread0
      3330 ±  2%     -12.3%       2922 ±  1%  
sched_debug.cfs_rq[11]:/.tg->runnable_avg
     47674 ± 10%     +46.5%      69861 ±  3%  
sched_debug.cfs_rq[11]:/.tg_load_avg
      3330 ±  2%     -12.0%       2930 ±  1%  
sched_debug.cfs_rq[12]:/.tg->runnable_avg
     47674 ± 10%     +46.4%      69806 ±  3%  
sched_debug.cfs_rq[12]:/.tg_load_avg
      3330 ±  2%     -12.0%       2930 ±  1%  
sched_debug.cfs_rq[13]:/.tg->runnable_avg
     47674 ± 10%     +46.4%      69806 ±  3%  
sched_debug.cfs_rq[13]:/.tg_load_avg
      3338 ±  2%     -12.2%       2930 ±  1%  
sched_debug.cfs_rq[14]:/.tg->runnable_avg
     47612 ± 10%     +46.6%      69806 ±  3%  
sched_debug.cfs_rq[14]:/.tg_load_avg
     13486 ± 65%     -66.1%       4567 ± 44%  
sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
      3347 ±  2%     -12.5%       2930 ±  1%  
sched_debug.cfs_rq[15]:/.tg->runnable_avg
     47536 ± 10%     +46.8%      69806 ±  3%  
sched_debug.cfs_rq[15]:/.tg_load_avg
    295.00 ± 67%     -66.2%      99.75 ± 45%  
sched_debug.cfs_rq[15]:/.tg_runnable_contrib
      3329 ±  2%     -12.4%       2917 ±  2%  
sched_debug.cfs_rq[1]:/.tg->runnable_avg
     48268 ± 10%     +46.7%      70802 ±  3%  
sched_debug.cfs_rq[1]:/.tg_load_avg
    611.00 ±164%    +895.1%       6080 ± 65%  
sched_debug.cfs_rq[2]:/.blocked_load_avg
      3328 ±  2%     -13.0%       2897 ±  1%  
sched_debug.cfs_rq[2]:/.tg->runnable_avg
     48268 ± 10%     +45.8%      70372 ±  3%  
sched_debug.cfs_rq[2]:/.tg_load_avg
    611.00 ±164%    +961.3%       6484 ± 69%  
sched_debug.cfs_rq[2]:/.tg_load_contrib
      2088 ± 22%     -30.6%       1448 ±  2%  
sched_debug.cfs_rq[3]:/.min_vruntime
      3328 ±  2%     -13.0%       2897 ±  1%  
sched_debug.cfs_rq[3]:/.tg->runnable_avg
     48268 ± 10%     +45.8%      70372 ±  3%  
sched_debug.cfs_rq[3]:/.tg_load_avg
      3330 ±  2%     -12.8%       2902 ±  1%  
sched_debug.cfs_rq[4]:/.tg->runnable_avg
     48037 ± 10%     +46.3%      70285 ±  3%  
sched_debug.cfs_rq[4]:/.tg_load_avg
      3321 ±  2%     -12.5%       2905 ±  1%  
sched_debug.cfs_rq[5]:/.tg->runnable_avg
     48034 ± 10%     +46.2%      70241 ±  3%  
sched_debug.cfs_rq[5]:/.tg_load_avg
      5958 ± 58%     -79.2%       1239 ± 77%  
sched_debug.cfs_rq[6]:/.blocked_load_avg
      3321 ±  2%     -12.4%       2909 ±  1%  
sched_debug.cfs_rq[6]:/.tg->runnable_avg
     48034 ± 10%     +46.2%      70222 ±  3%  
sched_debug.cfs_rq[6]:/.tg_load_avg
      6017 ± 57%     -79.4%       1239 ± 77%  
sched_debug.cfs_rq[6]:/.tg_load_contrib
     -4384 ±-20%     -23.6%      -3350 ±-37%  sched_debug.cfs_rq[7]:/.spread0
      3321 ±  2%     -12.4%       2909 ±  1%  
sched_debug.cfs_rq[7]:/.tg->runnable_avg
     47777 ± 10%     +47.0%      70222 ±  3%  
sched_debug.cfs_rq[7]:/.tg_load_avg
      6303 ± 42%     +54.5%       9736 ± 14%  
sched_debug.cfs_rq[8]:/.avg->runnable_avg_sum
    909.28 ± 36%     +91.1%       1738 ± 39%  
sched_debug.cfs_rq[8]:/.min_vruntime
      3330 ±  2%     -12.5%       2914 ±  1%  
sched_debug.cfs_rq[8]:/.tg->runnable_avg
     47674 ± 10%     +47.2%      70180 ±  3%  
sched_debug.cfs_rq[8]:/.tg_load_avg
    137.75 ± 43%     +52.8%     210.50 ± 14%  
sched_debug.cfs_rq[8]:/.tg_runnable_contrib
     99.27 ± 18%     +50.2%     149.07 ± 22%  sched_debug.cfs_rq[9]:/.exec_clock
      3330 ±  2%     -12.3%       2922 ±  1%  
sched_debug.cfs_rq[9]:/.tg->runnable_avg
     47674 ± 10%     +46.5%      69861 ±  3%  
sched_debug.cfs_rq[9]:/.tg_load_avg
     27.00 ± 43%     -55.6%      12.00 ±  5%  sched_debug.cpu#0.cpu_load[3]
    889905 ±  9%     -24.5%     671951 ± 16%  sched_debug.cpu#1.avg_idle
     10.00 ± 43%     -70.0%       3.00 ±102%  sched_debug.cpu#1.cpu_load[3]
      8.50 ± 52%     -79.4%       1.75 ±102%  sched_debug.cpu#1.cpu_load[4]
      7.75 ± 19%    -108.6%      -0.67 ±-430%  
sched_debug.cpu#10.nr_uninterruptible
      2398 ± 82%     -86.6%     321.25 ± 45%  sched_debug.cpu#10.ttwu_count
      1835 ± 95%     -96.5%      64.00 ± 30%  sched_debug.cpu#10.ttwu_local
      1368 ±  8%     +26.8%       1736 ± 18%  sched_debug.cpu#11.nr_switches
      1373 ±  8%     +26.6%       1738 ± 18%  sched_debug.cpu#11.sched_count
    509.75 ± 10%     +35.9%     693.00 ± 22%  sched_debug.cpu#11.sched_goidle
    578.00 ± 30%     +36.0%     786.25 ± 17%  sched_debug.cpu#11.ttwu_count
    334.00 ± 36%    +115.6%     720.25 ± 16%  sched_debug.cpu#13.ttwu_count
    588893 ± 42%     +64.2%     966897 ±  5%  sched_debug.cpu#5.avg_idle
     -4.00 ±-68%    -118.8%       0.75 ±331%  
sched_debug.cpu#5.nr_uninterruptible
      2.25 ±164%    -355.6%      -5.75 ±-65%  
sched_debug.cpu#7.nr_uninterruptible
    343.25 ± 42%     +86.7%     641.00 ± 43%  sched_debug.cpu#9.ttwu_count

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
  
lituya/fwq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/20x/100000ss

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
      0.11 ±  2%     -44.0%       0.06 ±  2%  fwq.stddev
   3229188 ±  0%     -85.9%     455853 ± 11%  
fwq.time.involuntary_context_switches
     13780 ±  1%      +5.7%      14566 ±  0%  fwq.time.maximum_resident_set_size
    176058 ± 20%     -31.6%     120345 ±  0%  
latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
      0.30 ± 22%     -76.9%       0.07 ±  0%  turbostat.CPU%c1
    484.63 ± 56%     -36.7%     307.00 ±  1%  uptime.idle
     16399 ±  3%     -78.3%       3553 ±  2%  vmstat.system.cs
     22757 ±  3%     -26.2%      16801 ±  0%  vmstat.system.in
      3907 ±  4%     -10.0%       3517 ±  4%  slabinfo.anon_vma.active_objs
      3907 ±  4%     -10.0%       3517 ±  4%  slabinfo.anon_vma.num_objs
      8215 ±  4%      -8.4%       7522 ±  4%  slabinfo.kmalloc-512.num_objs
   3229188 ±  0%     -85.9%     455853 ± 11%  time.involuntary_context_switches
      1791 ±  3%     +10.3%       1976 ±  3%  time.minor_page_faults
      3.12 ±  1%     -72.4%       0.86 ±  6%  time.system_time
   2392395 ±114%     -93.4%     156963 ± 92%  cpuidle.C1-HSW.time
     48618 ± 12%     -99.3%     331.33 ± 14%  cpuidle.C1-HSW.usage
 2.271e+08 ±130%     -82.5%   39690941 ±  3%  cpuidle.C6-HSW.time
      5212 ± 82%     -75.3%       1289 ±  6%  cpuidle.C6-HSW.usage
      6.25 ± 66%    -100.0%       0.00 ±  0%  cpuidle.POLL.time
      2.75 ± 15%    -100.0%       0.00 ±  0%  cpuidle.POLL.usage
    300.50 ± 49%    +108.2%     625.67 ± 33%  
sched_debug.cfs_rq[0]:/.blocked_load_avg
     55.25 ±  4%      -7.7%      51.00 ±  4%  sched_debug.cfs_rq[0]:/.load
     58.75 ±  8%     -14.9%      50.00 ±  1%  
sched_debug.cfs_rq[0]:/.runnable_load_avg
    364.25 ± 42%     +87.7%     683.67 ± 28%  
sched_debug.cfs_rq[0]:/.tg_load_contrib
    912.75 ± 35%     -41.1%     537.33 ± 70%  
sched_debug.cfs_rq[10]:/.tg_load_contrib
    792.33 ± 74%    +151.1%       1989 ± 32%  
sched_debug.cfs_rq[11]:/.blocked_load_avg
    659.50 ± 92%    +212.5%       2061 ± 30%  
sched_debug.cfs_rq[11]:/.tg_load_contrib
    489.50 ± 59%    +158.1%       1263 ± 20%  
sched_debug.cfs_rq[12]:/.blocked_load_avg
    544.75 ± 53%    +143.4%       1326 ± 19%  
sched_debug.cfs_rq[12]:/.tg_load_contrib
     98.25 ± 86%    +298.3%     391.33 ± 34%  
sched_debug.cfs_rq[1]:/.blocked_load_avg
    157.25 ± 57%    +194.9%     463.67 ± 28%  
sched_debug.cfs_rq[1]:/.tg_load_contrib
    324.00 ± 68%    +471.2%       1850 ± 22%  
sched_debug.cfs_rq[3]:/.blocked_load_avg
    379.75 ± 60%    +402.9%       1909 ± 21%  
sched_debug.cfs_rq[3]:/.tg_load_contrib
     67.75 ± 26%     -23.2%      52.00 ±  6%  sched_debug.cfs_rq[5]:/.load
      1586 ± 85%     -64.3%     566.67 ± 46%  
sched_debug.cfs_rq[6]:/.blocked_load_avg
      1679 ± 82%     -61.8%     642.00 ± 43%  
sched_debug.cfs_rq[6]:/.tg_load_contrib
      1.25 ± 34%    +406.7%       6.33 ± 39%  
sched_debug.cfs_rq[7]:/.nr_spread_over
     59.75 ± 12%     -11.3%      53.00 ±  6%  sched_debug.cpu#0.cpu_load[0]
     55.25 ±  4%      -7.7%      51.00 ±  4%  sched_debug.cpu#0.load
    125050 ± 80%     -83.6%      20475 ± 37%  sched_debug.cpu#1.nr_switches
      1.75 ±240%    +776.2%      15.33 ± 13%  
sched_debug.cpu#1.nr_uninterruptible
    125107 ± 80%     -83.6%      20557 ± 37%  sched_debug.cpu#1.sched_count
     54622 ± 93%     -78.4%      11825 ± 72%  sched_debug.cpu#1.ttwu_count
     36441 ± 92%     -91.9%       2955 ± 47%  sched_debug.cpu#1.ttwu_local
      7.75 ± 78%     -95.7%       0.33 ±282%  
sched_debug.cpu#10.nr_uninterruptible
      7.25 ± 45%     -51.7%       3.50 ± 42%  
sched_debug.cpu#12.nr_uninterruptible
      1584 ± 72%   +1888.2%      31493 ±121%  sched_debug.cpu#13.ttwu_count
     12188 ±141%     -91.0%       1100 ± 68%  sched_debug.cpu#4.sched_goidle
     80.00 ± 15%     -27.1%      58.33 ±  7%  sched_debug.cpu#6.cpu_load[3]
     78.00 ± 16%     -27.8%      56.33 ±  4%  sched_debug.cpu#6.cpu_load[4]
    128000 ±128%     -95.1%       6219 ±103%  sched_debug.cpu#8.ttwu_count
    106189 ±165%     -99.7%     357.33 ±  2%  sched_debug.cpu#8.ttwu_local
     32547 ±143%     -89.9%       3291 ± 62%  sched_debug.cpu#9.nr_switches
     32615 ±143%     -89.7%       3352 ± 62%  sched_debug.cpu#9.sched_count
     26785 ± 79%     -85.9%       3781 ± 81%  sched_debug.cpu#9.ttwu_count
      5.94 ±172%    -100.0%       0.00 ± 85%  sched_debug.rt_rq[2]:/.rt_time
      1.89 ±172%     -99.9%       0.00 ± 89%  sched_debug.rt_rq[8]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime/nr_threads/cluster/test:
  
lkp-ne02/netperf/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s/200%/cs-localhost/SCTP_RR

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   1233946 ±  1%     -93.3%      83018 ± 13%  
netperf.time.involuntary_context_switches
     26623 ±120%     -76.8%       6174 ± 63%  
latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs4_file_open.[nfsv4].do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
    301360 ±  2%      -8.2%     276612 ±  1%  softirqs.RCU
   1233946 ±  1%     -93.3%      83018 ± 13%  time.involuntary_context_switches
      0.22 ± 12%     -27.0%       0.16 ± 13%  turbostat.CPU%c1
     26675 ±  0%     -32.3%      18052 ±  0%  vmstat.system.in
      9078 ±  5%     +12.7%      10235 ±  3%  
slabinfo.vm_area_struct.active_objs
      9128 ±  4%     +12.8%      10298 ±  3%  slabinfo.vm_area_struct.num_objs
   3247591 ± 37%     -42.2%    1877235 ± 23%  cpuidle.C1-NHM.time
    175462 ±  7%     -95.2%       8494 ±  6%  cpuidle.C1-NHM.usage
    863871 ± 11%    -100.0%      63.00 ±  8%  cpuidle.POLL.time
    175219 ± 10%    -100.0%       5.50 ± 20%  cpuidle.POLL.usage
      2731 ±120%    +218.4%       8696 ±  3%  numa-meminfo.node0.Inactive(anon)
     15363 ±  5%      +7.2%      16474 ±  4%  numa-meminfo.node0.SUnreclaim
      6557 ± 50%     -91.3%     570.25 ± 49%  numa-meminfo.node1.Inactive(anon)
      7805 ± 13%     -25.5%       5812 ±  0%  numa-meminfo.node1.Mapped
     18367 ±  2%      -7.4%      17002 ±  5%  numa-meminfo.node1.SReclaimable
    682.50 ±120%    +218.5%       2173 ±  3%  numa-vmstat.node0.nr_inactive_anon
      3840 ±  5%      +7.2%       4118 ±  4%  
numa-vmstat.node0.nr_slab_unreclaimable
      1639 ± 50%     -91.3%     142.00 ± 49%  numa-vmstat.node1.nr_inactive_anon
      1950 ± 13%     -25.5%       1452 ±  0%  numa-vmstat.node1.nr_mapped
      4591 ±  2%      -7.4%       4250 ±  5%  
numa-vmstat.node1.nr_slab_reclaimable
      1.00 ± 70%    +350.0%       4.50 ± 57%  
sched_debug.cfs_rq[12]:/.nr_spread_over
     -1967 ±-3098%   +5332.3%    -106895 ±-79%  sched_debug.cfs_rq[13]:/.spread0
    103.00 ±  5%     +19.4%     123.00 ±  5%  sched_debug.cfs_rq[15]:/.load
     95.75 ± 10%     +16.7%     111.75 ± 10%  
sched_debug.cfs_rq[15]:/.runnable_load_avg
     -1514 ±-4117%   +6467.6%     -99452 ±-81%  sched_debug.cfs_rq[15]:/.spread0
   1116022 ±  6%     +11.2%    1240796 ±  4%  
sched_debug.cfs_rq[2]:/.MIN_vruntime
   1116022 ±  6%     +11.2%    1240796 ±  4%  
sched_debug.cfs_rq[2]:/.max_vruntime
   1084538 ±  9%     +15.0%    1247278 ±  4%  
sched_debug.cfs_rq[3]:/.MIN_vruntime
   1084538 ±  9%     +15.0%    1247278 ±  4%  
sched_debug.cfs_rq[3]:/.max_vruntime
     12.25 ± 10%     -40.8%       7.25 ± 35%  
sched_debug.cfs_rq[5]:/.nr_spread_over
     -3847 ±-1573%   +2484.2%     -99431 ±-80%  sched_debug.cfs_rq[7]:/.spread0
     -2145 ±-139%    +451.8%     -11836 ±-59%  sched_debug.cfs_rq[8]:/.spread0
    119.00 ±  7%     -23.1%      91.50 ± 12%  sched_debug.cpu#0.cpu_load[0]
    105.25 ±  3%     -15.9%      88.50 ±  6%  sched_debug.cpu#0.cpu_load[1]
     99.00 ±  4%     -13.1%      86.00 ±  5%  sched_debug.cpu#0.cpu_load[2]
      1480 ± 21%     -34.8%     965.50 ± 10%  sched_debug.cpu#0.curr->pid
      2943 ± 29%     +28.1%       3770 ±  1%  sched_debug.cpu#0.sched_goidle
      1784 ± 77%     -75.2%     442.50 ±  7%  sched_debug.cpu#13.sched_goidle
     88778 ± 53%     +58.4%     140611 ±  8%  sched_debug.cpu#2.avg_idle
     89.00 ±  6%     +27.2%     113.25 ±  4%  sched_debug.cpu#3.load
      2979 ± 51%     -67.9%     956.75 ± 19%  sched_debug.cpu#7.sched_goidle
      1369 ± 29%     -26.8%       1002 ± 21%  sched_debug.cpu#9.curr->pid

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime/nr_threads/cluster/test:
  
lkp-ne02/netperf/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s/200%/cs-localhost/TCP_SENDFILE

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   2033310 ±  0%     -97.2%      57638 ±  4%  
netperf.time.involuntary_context_switches
    165110 ± 33%     +47.2%     243029 ± 20%  proc-vmstat.pgalloc_normal
   2033310 ±  0%     -97.2%      57638 ±  4%  time.involuntary_context_switches
      3320 ±  7%     +12.2%       3725 ±  4%  numa-meminfo.node0.KernelStack
      2760 ±  8%     -15.4%       2335 ±  6%  numa-meminfo.node1.KernelStack
     78.25 ± 37%     -78.3%      17.00 ±139%  numa-vmstat.node1.nr_dirtied
     76.25 ± 37%     -78.4%      16.50 ±138%  numa-vmstat.node1.nr_written
      0.22 ±  5%     -35.6%       0.14 ±  8%  turbostat.CPU%c1
      0.53 ±  5%     +16.0%       0.61 ±  0%  turbostat.CPU%c6
     49180 ±  0%     -48.2%      25479 ±  0%  vmstat.system.cs
     27651 ±  0%     -37.2%      17351 ±  0%  vmstat.system.in
   4278144 ±  1%     -22.0%    3335680 ±  0%  
latency_stats.hits.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
     44720 ± 36%    +113.9%      95674 ±108%  
latency_stats.sum.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
 2.379e+09 ±  0%      -1.2%  2.351e+09 ±  0%  
latency_stats.sum.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
   1875754 ±  8%     -75.8%     453014 ± 30%  cpuidle.C1-NHM.time
    149386 ±  1%     -95.2%       7207 ± 45%  cpuidle.C1-NHM.usage
    322.75 ± 51%     -65.1%     112.50 ± 16%  cpuidle.C1E-NHM.usage
     14707 ±165%     -99.7%      42.00 ± 41%  cpuidle.POLL.time
    232.50 ± 82%     -98.6%       3.25 ± 45%  cpuidle.POLL.usage
    106.75 ±  2%     -10.5%      95.50 ±  5%  sched_debug.cfs_rq[12]:/.load
    112.50 ±  7%     -15.8%      94.75 ±  5%  
sched_debug.cfs_rq[12]:/.runnable_load_avg
    110.75 ±  3%     -12.0%      97.50 ± 15%  
sched_debug.cfs_rq[13]:/.runnable_load_avg
    143.00 ± 98%    +628.1%       1041 ± 47%  
sched_debug.cfs_rq[2]:/.blocked_load_avg
    247.75 ± 57%    +366.3%       1155 ± 42%  
sched_debug.cfs_rq[2]:/.tg_load_contrib
     42.50 ±158%    +665.3%     325.25 ± 68%  
sched_debug.cfs_rq[3]:/.blocked_load_avg
    -75934 ±-49%    -129.8%      22591 ± 76%  sched_debug.cfs_rq[3]:/.spread0
    145.50 ± 46%    +199.0%     435.00 ± 51%  
sched_debug.cfs_rq[3]:/.tg_load_contrib
     30.75 ± 31%     +74.8%      53.75 ± 25%  
sched_debug.cfs_rq[4]:/.nr_spread_over
    102.75 ±  3%     +10.7%     113.75 ±  6%  sched_debug.cpu#0.cpu_load[0]
      2530 ± 20%     +68.6%       4265 ± 23%  sched_debug.cpu#0.sched_goidle
    350.25 ± 10%     +81.1%     634.25 ± 24%  sched_debug.cpu#10.sched_goidle
    112.00 ±  7%     -14.7%      95.50 ±  8%  sched_debug.cpu#12.cpu_load[0]
    110.25 ±  6%     -11.3%      97.75 ±  5%  sched_debug.cpu#12.cpu_load[1]
    109.50 ±  4%      -8.9%      99.75 ±  4%  sched_debug.cpu#12.cpu_load[2]
    110.75 ±  4%     -12.0%      97.50 ± 11%  sched_debug.cpu#13.cpu_load[1]
    111.25 ±  4%     -11.9%      98.00 ± 10%  sched_debug.cpu#13.cpu_load[2]
    624.00 ± 23%     -32.6%     420.50 ± 16%  sched_debug.cpu#13.sched_goidle
    947672 ± 76%     -77.0%     217667 ±  3%  sched_debug.cpu#15.nr_switches
    947705 ± 76%     -77.0%     217685 ±  3%  sched_debug.cpu#15.sched_count
    592433 ± 65%     -66.0%     201467 ±  1%  sched_debug.cpu#15.ttwu_local
    911.50 ±  8%      +9.5%     998.50 ± 11%  sched_debug.cpu#3.curr->pid
    562814 ± 32%     +41.8%     798162 ± 15%  sched_debug.cpu#4.avg_idle
    277723 ± 12%     -17.9%     227889 ±  5%  sched_debug.cpu#4.nr_switches
    277747 ± 12%     -17.9%     227907 ±  5%  sched_debug.cpu#4.sched_count
    109.00 ±  2%     -11.0%      97.00 ±  0%  sched_debug.cpu#5.load

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/entries:
  
snb-drag/tlbflush/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/200%/32x/512

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
     89406 ±  1%      +2.1%      91264 ±  0%  tlbflush.mem_acc_time_thread_ms
     12692 ± 29%     -69.7%       3848 ±  6%  
tlbflush.time.involuntary_context_switches
     45262 ± 14%     -20.5%      35996 ± 13%  softirqs.SCHED
     12692 ± 29%     -69.7%       3848 ±  6%  time.involuntary_context_switches
      5023 ± 14%     +24.8%       6271 ±  4%  slabinfo.kmalloc-32.active_objs
      5023 ± 14%     +24.8%       6271 ±  4%  slabinfo.kmalloc-32.num_objs
     62647 ±  4%     -20.7%      49700 ±  4%  vmstat.system.cs
     26516 ±  6%     -24.7%      19964 ±  3%  vmstat.system.in
 1.486e+08 ±  4%     -25.5%  1.108e+08 ±  7%  cpuidle.C1-SNB.time
   9489652 ±  0%     -45.4%    5183155 ±  1%  cpuidle.C1-SNB.usage
     94983 ± 14%     -35.2%      61571 ±  0%  cpuidle.POLL.usage
    424061 ± 57%    -100.0%       0.00 ± -1%  
latency_stats.avg.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
    424061 ± 57%    -100.0%       0.00 ± -1%  
latency_stats.max.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
    424061 ± 57%    -100.0%       0.00 ± -1%  
latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
      4767 ± 56%     -94.4%     268.00 ± 30%  sched_debug.cfs_rq[0]:/.load
     17.25 ± 58%    +127.5%      39.25 ± 19%  
sched_debug.cfs_rq[3]:/.runnable_load_avg
   8521655 ± 15%     -42.7%    4882199 ± 15%  sched_debug.cpu#0.nr_switches
   8522183 ± 15%     -42.7%    4882721 ± 15%  sched_debug.cpu#0.sched_count
   4225538 ± 15%     -42.7%    2421794 ± 15%  sched_debug.cpu#0.sched_goidle
   4280766 ± 15%     -41.3%    2511288 ± 15%  sched_debug.cpu#0.ttwu_count
   3693688 ± 17%     -48.0%    1919886 ± 18%  sched_debug.cpu#0.ttwu_local
  10474544 ± 12%     -43.9%    5872222 ±  9%  sched_debug.cpu#1.nr_switches
  10474799 ± 12%     -43.9%    5872473 ±  9%  sched_debug.cpu#1.sched_count
   5198778 ± 12%     -43.9%    2917524 ±  9%  sched_debug.cpu#1.sched_goidle
   5265913 ± 12%     -44.2%    2940722 ±  9%  sched_debug.cpu#1.ttwu_count
   4654824 ± 14%     -48.5%    2396748 ± 10%  sched_debug.cpu#1.ttwu_local
      6.50 ± 50%    +138.5%      15.50 ± 25%  sched_debug.cpu#3.cpu_load[1]
      5.25 ± 28%    +114.3%      11.25 ± 25%  sched_debug.cpu#3.cpu_load[2]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/test:
  nhm-white/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/shell8

commit: 
  a4be9881623375fd126762af65ef18dc8175c68d
  fa58aafc44805ac425d17c6a8082513b5442ce9d

a4be9881623375fd fa58aafc44805ac425d17c6a80 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   5937622 ±  1%     -26.4%    4369228 ±  0%  
unixbench.time.involuntary_context_switches
     51200 ±  3%     +15.0%      58880 ±  4%  meminfo.DirectMap4k
   5937622 ±  1%     -26.4%    4369228 ±  0%  time.involuntary_context_switches
     62839 ±  1%     -28.4%      44966 ±  2%  vmstat.system.cs
     19289 ±  2%     -46.2%      10378 ±  2%  vmstat.system.in
      6.09 ±  0%      +8.9%       6.63 ±  2%  turbostat.CPU%c3
      0.45 ± 10%     +50.6%       0.67 ± 20%  turbostat.Pkg%pc3
      3.17 ±  1%     +62.5%       5.15 ± 47%  turbostat.Pkg%pc6
  45216499 ±  0%     -34.6%   29566388 ±  1%  cpuidle.C1-NHM.time
   1918738 ±  7%     -88.4%     222808 ±  2%  cpuidle.C1-NHM.usage
    220032 ± 10%     -90.2%      21647 ± 11%  cpuidle.POLL.time
     30597 ±  3%     -96.6%       1051 ±  2%  cpuidle.POLL.usage
      1886 ± 45%     +73.6%       3275 ± 36%  
sched_debug.cfs_rq[4]:/.utilization_load_avg
    294740 ±  5%     +42.0%     418454 ±  8%  sched_debug.cpu#1.avg_idle
   2624072 ± 63%     -60.8%    1029438 ±  2%  sched_debug.cpu#1.nr_switches
   2624725 ± 63%     -60.8%    1030184 ±  2%  sched_debug.cpu#1.sched_count
   1203729 ± 70%     -66.9%     398300 ±  1%  sched_debug.cpu#1.ttwu_count
    992043 ± 86%     -81.8%     180660 ±  3%  sched_debug.cpu#1.ttwu_local
     15179 ± 13%     -43.3%       8606 ± 20%  sched_debug.cpu#2.curr->pid
   -204.00 ±-22%     -47.5%    -107.00 ±-43%  
sched_debug.cpu#2.nr_uninterruptible
    184.75 ± 28%     -28.1%     132.75 ± 13%  
sched_debug.cpu#5.nr_uninterruptible
     14010 ± 11%     -20.8%      11095 ± 16%  sched_debug.cpu#7.curr->pid
   2209845 ± 57%     -56.4%     962613 ±  3%  sched_debug.cpu#7.nr_switches
   2210474 ± 57%     -56.4%     963302 ±  3%  sched_debug.cpu#7.sched_count
    575333 ± 61%     -58.4%     239461 ±  1%  sched_debug.cpu#7.sched_goidle
      7.45 ±124%     -99.9%       0.01 ±  3%  sched_debug.rt_rq[5]:/.rt_time


lkp-a06: Atom
Memory: 8G

nhm4: Nehalem
Memory: 4G

lituya: Grantley Haswell
Memory: 16G

lkp-ne02: Nehalem-EP
Memory: 5G

snb-drag: Sandy Bridge
Memory: 6G

nhm-white: Nehalem
Memory: 6G



                        aim7.time.voluntary_context_switches

  58000 ++------------------------------------------------------------------+
        |                  *                                                |
  56000 *+               .. +      .*                               .*      |
  54000 ++*..*.*.*..   .*    *.*..*  +  .*     .*.*..*.*.    .*.  .*  +  .*.*
        |           *.*               *.  +  .*          *..*   *.     *.   |
  52000 ++                                 *.                               |
  50000 ++                                                                  |
        |                                                                   |
  48000 ++                                                                  |
  46000 ++                                                                  |
        |                                                                   |
  44000 O+   O   O    O                                                     |
  42000 ++O    O                  O O O                                     |
        |           O   O  O O O         O                                  |
  40000 ++------------------------------------------------------------------+


                        aim7.time.involuntary_context_switches

  2.2e+06 ++----------------------------------------------------------------+
    2e+06 *+*..*.*.*.*..   .*.  .*.*.*..*.*.*.*..*.*.*..*.*.*.*..*.*.*.*..*.*
          |             *.*   *.                                            |
  1.8e+06 ++                                                                |
  1.6e+06 ++                                                                |
  1.4e+06 ++                                                                |
  1.2e+06 ++                                                                |
          |                                                                 |
    1e+06 ++                                                                |
   800000 ++                                                                |
   600000 ++                                                                |
   400000 ++                                                                |
          |                                                                 |
   200000 O+O  O O O O  O O O O  O O O  O O                                 |
        0 ++----------------------------------------------------------------+


        [*] bisect-good sample
        [O] bisect-bad  sample

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: 4334e59ef28396425833e152edf40c40e58ccb84
model: Atom
memory: 8G
hdd_partitions: "/dev/disk/by-id/ata-TOSHIBA_MK8061GSYB_22FHP0NGT-part1"
swap_partitions: 
rootfs_partition: "/dev/disk/by-id/ata-TOSHIBA_MK8061GSYB_22FHP0NGT-part2"
testcase: aim7
aim7:
  load: 4000
  test: pipe_cpy
queue: cyclic
testbox: lkp-a06
tbox_group: lkp-a06
kconfig: x86_64-rhel
enqueue_time: 2015-08-13 15:19:11.193423027 +08:00
id: 34aa50e279ab0f513acb7f6abd3a572fe64787a7
user: lkp
compiler: gcc-4.9
head_commit: 4334e59ef28396425833e152edf40c40e58ccb84
base_commit: f7644cbfcdf03528f0f450f3940c4985b2291f49
branch: linux-devel/devel-hourly-2015081302
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/4334e59ef28396425833e152edf40c40e58ccb84/vmlinuz-4.2.0-rc6-wl-ath-03840-g4334e59"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/aim7/4000-pipe_cpy/lkp-a06/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4334e59ef28396425833e152edf40c40e58ccb84/0"
job_file: 
"/lkp/scheduled/lkp-a06/cyclic_aim7-4000-pipe_cpy-x86_64-rhel-CYCLIC_HEAD-4334e59ef28396425833e152edf40c40e58ccb84-20150813-16309-gx9rp8-0.yaml"
dequeue_time: 2015-08-13 16:29:29.826691265 +08:00
nr_cpu: "$(nproc)"
max_uptime: 3600
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-a06/cyclic_aim7-4000-pipe_cpy-x86_64-rhel-CYCLIC_HEAD-4334e59ef28396425833e152edf40c40e58ccb84-20150813-16309-gx9rp8-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015081302
- commit=4334e59ef28396425833e152edf40c40e58ccb84
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/4334e59ef28396425833e152edf40c40e58ccb84/vmlinuz-4.2.0-rc6-wl-ath-03840-g4334e59
- max_uptime=3600
- 
RESULT_ROOT=/result/aim7/4000-pipe_cpy/lkp-a06/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4334e59ef28396425833e152edf40c40e58ccb84/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/4334e59ef28396425833e152edf40c40e58ccb84/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/lkp/benchmarks/aim7-x86_64.cgz"
job_state: finished
loadavg: 2048.94 721.78 257.37 1/131 4538
start_time: '1439454675'
end_time: '1439454764'
version: "/lkp/lkp/.src-20150813-001640"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
netserver
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1
netperf -t TCP_SENDFILE -c -C -l 300 -H 127.0.0.1

Reply via email to