FYI, we noticed that will-it-scale.time.percent_of_cpu_this_job_got +41.4% 
change
with your commit.

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 56a17b8836398ffab52c00dda8442f1680dc251b ("mm: temporarily mark THP 
broken")


=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/tbox_group/test/testcase:
  
gcc-4.9/performance/x86_64-rhel/debian-x86_64-2015-02-07.cgz/lkp-sbx04/malloc1/will-it-scale

commit: 
  122afea9626ab3f717b250a8dd3d5ebf57cdb56c
  56a17b8836398ffab52c00dda8442f1680dc251b

122afea9626ab3f7 56a17b8836398ffab52c00dda8 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
     78370 ±  0%      +1.6%      79638 ±  0%  will-it-scale.per_process_ops
     39117 ±  0%      +7.3%      41971 ±  0%  will-it-scale.per_thread_ops
     36425 ±  6%     -74.4%       9327 ±  0%  
will-it-scale.time.maximum_resident_set_size
   7633416 ±  0%     +60.8%   12272010 ±  0%  
will-it-scale.time.minor_page_faults
     67.00 ±  1%     +41.4%      94.75 ±  0%  
will-it-scale.time.percent_of_cpu_this_job_got
    200.68 ±  0%     +38.5%     278.00 ±  0%  will-it-scale.time.system_time
      7.88 ±  2%     +98.5%      15.65 ±  1%  will-it-scale.time.user_time
   3902893 ±  5%    +302.9%   15723910 ±  0%  
will-it-scale.time.voluntary_context_switches
    150273 ±  4%     +34.7%     202476 ±  8%  softirqs.RCU
     27428 ±  4%    +277.4%     103508 ±  0%  vmstat.system.cs
     20080 ±  3%     +93.3%      38820 ±  1%  vmstat.system.in
     48628 ±  2%     -15.6%      41041 ±  0%  meminfo.Active(anon)
      7893 ± 15%    -100.0%       0.00 ± -1%  meminfo.AnonHugePages
     36334 ±  3%     -21.2%      28619 ±  0%  meminfo.AnonPages
     33788 ±  0%     +17.9%      39840 ±  0%  numa-vmstat.node0.numa_interleave
      3801 ± 31%     -46.6%       2028 ± 36%  numa-vmstat.node1.nr_active_anon
    326.25 ±  3%     -15.9%     274.50 ±  2%  numa-vmstat.node1.nr_alloc_batch
      3693 ± 10%     -49.3%       1870 ± 28%  numa-vmstat.node2.nr_anon_pages
      0.94 ±  5%     +11.7%       1.05 ±  3%  
perf-profile.cycles.___might_sleep.kmem_cache_alloc.mmap_region.do_mmap.vm_mmap_pgoff
      1.18 ±  2%     -11.3%       1.04 ±  1%  
perf-profile.cycles.___might_sleep.unmap_page_range.unmap_single_vma.unmap_vmas.unmap_region
      1.03 ±  1%      -7.5%       0.95 ±  4%  
perf-profile.cycles.__alloc_pages_nodemask.alloc_pages_current.pte_alloc_one.__pte_alloc.handle_mm_fault
      1.14 ±  1%      -7.7%       1.05 ±  3%  
perf-profile.cycles.alloc_pages_current.pte_alloc_one.__pte_alloc.handle_mm_fault.__do_page_fault
   1647719 ±  3%    +271.3%    6117589 ±  0%  
latency_stats.hits.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault
   1356059 ±  5%    +315.7%    5636837 ±  0%  
latency_stats.hits.call_rwsem_down_write_failed.vm_munmap.SyS_munmap.entry_SYSCALL_64_fastpath
      8333 ± 21%     -75.6%       2032 ±  8%  
latency_stats.sum.call_rwsem_down_read_failed.do_exit.do_group_exit.SyS_exit_group.entry_SYSCALL_64_fastpath
     93349 ± 11%     -71.7%      26423 ±  7%  
latency_stats.sum.call_rwsem_down_read_failed.do_exit.do_group_exit.get_signal.do_signal.exit_to_usermode_loop.prepare_exit_to_usermode.retint_user
     92440 ± 30%     -69.1%      28576 ± 17%  
latency_stats.sum.call_rwsem_down_read_failed.do_exit.do_group_exit.get_signal.do_signal.exit_to_usermode_loop.syscall_return_slowpath.int_ret_from_sys_call
    152444 ±  4%     -67.8%      49052 ±  3%  
latency_stats.sum.call_rwsem_down_write_failed.SyS_mprotect.entry_SYSCALL_64_fastpath
     36425 ±  6%     -74.4%       9327 ±  0%  time.maximum_resident_set_size
   7633416 ±  0%     +60.8%   12272010 ±  0%  time.minor_page_faults
     67.00 ±  1%     +41.4%      94.75 ±  0%  time.percent_of_cpu_this_job_got
    200.68 ±  0%     +38.5%     278.00 ±  0%  time.system_time
      7.88 ±  2%     +98.5%      15.65 ±  1%  time.user_time
   3902893 ±  5%    +302.9%   15723910 ±  0%  time.voluntary_context_switches
    983.75 ± 31%    -100.0%       0.00 ± -1%  numa-meminfo.node0.AnonHugePages
     39941 ± 10%     -18.2%      32681 ±  9%  numa-meminfo.node1.Active
     15187 ± 31%     -46.5%       8119 ± 36%  numa-meminfo.node1.Active(anon)
    676.00 ± 66%    -100.0%       0.00 ± -1%  numa-meminfo.node1.AnonHugePages
      5403 ± 20%    -100.0%       0.00 ± -1%  numa-meminfo.node2.AnonHugePages
     14772 ± 10%     -49.3%       7486 ± 28%  numa-meminfo.node2.AnonPages
    880.75 ± 48%    -100.0%       0.00 ± -1%  numa-meminfo.node3.AnonHugePages
     12167 ±  2%     -15.7%      10260 ±  0%  proc-vmstat.nr_active_anon
      1193 ±  0%     -11.6%       1054 ±  1%  proc-vmstat.nr_alloc_batch
      9094 ±  3%     -21.3%       7155 ±  0%  proc-vmstat.nr_anon_pages
  49373529 ±  1%     -29.1%   35001216 ±  0%  proc-vmstat.pgalloc_dma32
 2.245e+08 ±  0%     -15.2%  1.905e+08 ±  0%  proc-vmstat.pgalloc_normal
 2.739e+08 ±  0%     -17.7%  2.255e+08 ±  0%  proc-vmstat.pgfree
    113422 ±  3%    -100.0%       0.00 ± -1%  proc-vmstat.thp_fault_alloc
    113402 ±  3%    -100.0%       0.00 ± -1%  proc-vmstat.thp_split_page
     22.63 ±  0%      +3.0%      23.32 ±  0%  turbostat.%Busy
    652.50 ±  0%      +2.4%     668.00 ±  0%  turbostat.Avg_MHz
     27.60 ±  7%     +50.1%      41.43 ±  0%  turbostat.CPU%c1
      0.75 ± 21%     -97.7%       0.02 ± 47%  turbostat.CPU%c3
     49.02 ±  3%     -28.1%      35.23 ±  0%  turbostat.CPU%c7
    121.98 ±  0%      +6.2%     129.50 ±  0%  turbostat.CorWatt
     37.39 ±  2%     -19.0%      30.27 ±  1%  turbostat.Pkg%pc2
    176.87 ±  0%      +4.2%     184.25 ±  0%  turbostat.PkgWatt
  70837840 ± 23%    +157.4%  1.823e+08 ±  6%  cpuidle.C1-SNB.time
    352446 ±111%   +1438.3%    5421651 ±  3%  cpuidle.C1-SNB.usage
  68045347 ±  7%    +114.8%  1.462e+08 ±  1%  cpuidle.C1E-SNB.time
    152018 ± 29%    +932.9%    1570128 ±  9%  cpuidle.C1E-SNB.usage
  36961996 ± 23%    +550.6%  2.405e+08 ±  5%  cpuidle.C3-SNB.time
     87756 ± 24%    +762.1%     756587 ±  4%  cpuidle.C3-SNB.usage
   3624705 ±  3%    +168.4%    9729062 ±  0%  cpuidle.C7-SNB.usage
  34086233 ± 14%     +30.4%   44449058 ± 13%  cpuidle.POLL.time
     71342 ±  5%     +92.0%     136968 ±  3%  cpuidle.POLL.usage
     55973 ±  3%     +11.6%      62441 ±  2%  
sched_debug.cpu.nr_load_updates.avg
     23002 ±  3%     +14.0%      26212 ±  1%  
sched_debug.cpu.nr_load_updates.stddev
     56335 ±  8%    +286.9%     217982 ±  0%  sched_debug.cpu.nr_switches.avg
    224611 ± 21%    +358.7%    1030323 ±  3%  sched_debug.cpu.nr_switches.max
      6852 ±  4%    +124.6%      15390 ± 26%  sched_debug.cpu.nr_switches.min
     57323 ± 21%    +405.8%     289971 ±  2%  sched_debug.cpu.nr_switches.stddev
    155999 ±  2%    +104.1%     318341 ±  0%  sched_debug.cpu.sched_count.avg
      7095 ±  5%    +120.1%      15617 ± 27%  sched_debug.cpu.sched_count.min
     27854 ±  8%    +290.2%     108690 ±  0%  sched_debug.cpu.sched_goidle.avg
    111721 ± 21%    +360.8%     514811 ±  3%  sched_debug.cpu.sched_goidle.max
      3125 ±  6%    +140.9%       7528 ± 27%  sched_debug.cpu.sched_goidle.min
     28656 ± 21%    +406.0%     145007 ±  2%  
sched_debug.cpu.sched_goidle.stddev
     28531 ±  8%    +289.8%     111208 ±  0%  sched_debug.cpu.ttwu_count.avg
    113428 ± 20%    +362.5%     524580 ±  3%  sched_debug.cpu.ttwu_count.max
      2957 ± 17%    +152.7%       7472 ± 22%  sched_debug.cpu.ttwu_count.min
     29259 ± 20%    +403.8%     147410 ±  2%  sched_debug.cpu.ttwu_count.stddev


lkp-sbx04: Sandy Bridge-EX
Memory: 64G



To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Xiaolong Ye
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: will-it-scale
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  heartbeat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: 56a17b8836398ffab52c00dda8442f1680dc251b
model: Sandy Bridge-EX
nr_cpu: 64
memory: 64G
nr_ssd_partitions: 7
ssd_partitions: "/dev/disk/by-id/ata-INTEL_SSDSC2*-part1"
swap_partitions: 
category: benchmark
perf-profile:
  freq: 800
will-it-scale:
  test: malloc1
queue: bisect
testbox: lkp-sbx04
tbox_group: lkp-sbx04
kconfig: x86_64-rhel
enqueue_time: 2016-03-27 18:21:40.653443075 +08:00
compiler: gcc-4.9
rootfs: debian-x86_64-2015-02-07.cgz
id: 829a57740569556c5361c8da6ed2a4162b8b8104
user: lkp
head_commit: ac7362c9a1d0e6cdf0ec2f6e714e214cd0838d13
base_commit: b562e44f507e863c6792946e4e1b1449fbbac85d
branch: internal-eywa/master
result_root: 
"/result/will-it-scale/performance-malloc1/lkp-sbx04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/0"
job_file: 
"/lkp/scheduled/lkp-sbx04/bisect_will-it-scale-performance-malloc1-debian-x86_64-2015-02-07.cgz-x86_64-rhel-56a17b8836398ffab52c00dda8442f1680dc251b-20160327-19362-xb20kp-0.yaml"
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-sbx04/bisect_will-it-scale-performance-malloc1-debian-x86_64-2015-02-07.cgz-x86_64-rhel-56a17b8836398ffab52c00dda8442f1680dc251b-20160327-19362-xb20kp-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=internal-eywa/master
- commit=56a17b8836398ffab52c00dda8442f1680dc251b
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/vmlinuz-4.4.0-06411-g56a17b8
- max_uptime=1500
- 
RESULT_ROOT=/result/will-it-scale/performance-malloc1/lkp-sbx04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/will-it-scale.cgz,/lkp/benchmarks/will-it-scale.cgz,/lkp/benchmarks/will-it-scale-x86_64.cgz"
linux_headers_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/linux-headers.cgz"
repeat_to: 2
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/56a17b8836398ffab52c00dda8442f1680dc251b/vmlinuz-4.4.0-06411-g56a17b8"
dequeue_time: 2016-03-27 18:38:29.429040255 +08:00
job_state: finished
loadavg: 46.28 20.96 8.19 1/648 11544
start_time: '1459075193'
end_time: '1459075502'
version: "/lkp/lkp/.src-20160325-205817"
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
2016-03-27 18:39:51 echo performance > 
/sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu48/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu49/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu50/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu51/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu52/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu53/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu54/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu55/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu56/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu57/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu58/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu59/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu60/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu61/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu62/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu63/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
2016-03-27 18:39:52 echo performance > 
/sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
2016-03-27 18:39:53 ./runtest.py malloc1 16 both 1 8 16 24 32 48 64

Reply via email to