FYI, we noticed the below changes on https://github.com/0day-ci/linux Philippe-Longepe/intel_pstate-Use-the-cpu-load-to-determine-the-PercentPerformance/20151103-173647 commit 10e4d2e8d38473d3964068469dd03f8c69fca4e2 ("intel_pstate: Use the cpu load to determine the PercentPerformance")
========================================================================================= tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test: lituya/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/page_fault2 commit: 1ab68460b1d0671968b35e04f21efcf1ce051916 10e4d2e8d38473d3964068469dd03f8c69fca4e2 1ab68460b1d06719 10e4d2e8d38473d3964068469d ---------------- -------------------------- %stddev %change %stddev \ | \ 298605 ± 0% -13.9% 257084 ± 0% will-it-scale.per_thread_ops 6070 ± 3% -12.0% 5344 ± 4% will-it-scale.time.involuntary_context_switches 2082000 ± 0% -4.4% 1990194 ± 4% will-it-scale.time.maximum_resident_set_size 2.431e+08 ± 0% -27.8% 1.755e+08 ± 0% will-it-scale.time.minor_page_faults 317.00 ± 0% -15.8% 267.00 ± 0% will-it-scale.time.percent_of_cpu_this_job_got 929.86 ± 0% -17.9% 763.32 ± 0% will-it-scale.time.system_time 38.72 ± 0% +36.8% 52.96 ± 1% will-it-scale.time.user_time 160590 ± 0% -30.9% 110986 ± 0% will-it-scale.time.voluntary_context_switches 82310 ± 4% -10.1% 74007 ± 2% softirqs.RCU 76219 ± 1% -11.4% 67558 ± 2% softirqs.SCHED 21099 ± 0% -12.8% 18403 ± 0% vmstat.system.cs 17755 ± 0% -9.1% 16139 ± 0% vmstat.system.in 50721188 ± 11% -24.3% 38373487 ± 7% cpuidle.C1-HSW.time 2915214 ± 0% -12.7% 2544540 ± 0% cpuidle.C1-HSW.usage 1349 ± 7% -43.6% 761.75 ± 16% cpuidle.C1E-HSW.usage 1016 ± 10% -29.3% 718.75 ± 14% cpuidle.C3-HSW.usage 209143 ± 0% -22.2% 162794 ± 0% cpuidle.C6-HSW.usage 134.00 ± 21% +342.4% 592.75 ± 13% cpuidle.POLL.usage 5.851e+08 ± 0% -11.6% 5.172e+08 ± 0% proc-vmstat.numa_hit 5.851e+08 ± 0% -11.6% 5.172e+08 ± 0% proc-vmstat.numa_local 84352492 ± 0% -11.0% 75114262 ± 0% proc-vmstat.pgalloc_dma32 5.008e+08 ± 0% -11.7% 4.421e+08 ± 0% proc-vmstat.pgalloc_normal 5.813e+08 ± 0% -11.6% 5.136e+08 ± 0% proc-vmstat.pgfault 5.843e+08 ± 0% -11.6% 5.165e+08 ± 0% proc-vmstat.pgfree 6070 ± 3% -12.0% 5344 ± 4% time.involuntary_context_switches 2.431e+08 ± 0% -27.8% 1.755e+08 ± 0% time.minor_page_faults 317.00 ± 0% -15.8% 267.00 ± 0% time.percent_of_cpu_this_job_got 929.86 ± 0% -17.9% 763.32 ± 0% time.system_time 38.72 ± 0% +36.8% 52.96 ± 1% time.user_time 160590 ± 0% -30.9% 110986 ± 0% time.voluntary_context_switches 49.45 ± 0% -6.1% 46.46 ± 0% turbostat.%Busy 1628 ± 0% -24.9% 1223 ± 0% turbostat.Avg_MHz 3292 ± 0% -20.1% 2632 ± 0% turbostat.Bzy_MHz 30.06 ± 0% +12.9% 33.94 ± 0% turbostat.CPU%c6 39.00 ± 3% -18.6% 31.75 ± 1% turbostat.CoreTmp 44.25 ± 0% -11.9% 39.00 ± 0% turbostat.PkgTmp 60.82 ± 0% -21.9% 47.48 ± 0% turbostat.PkgWatt 9.46 ± 1% -12.3% 8.30 ± 0% turbostat.RAMWatt 114.00 ± 6% -22.8% 88.00 ± 32% sched_debug.cfs_rq[0]:/.load_avg 114.75 ± 6% -22.4% 89.00 ± 31% sched_debug.cfs_rq[0]:/.tg_load_avg_contrib 747.75 ± 4% -20.2% 597.00 ± 13% sched_debug.cfs_rq[0]:/.util_avg 35.00 ± 7% -22.1% 27.25 ± 11% sched_debug.cfs_rq[10]:/.load_avg 35.00 ± 7% -22.1% 27.25 ± 11% sched_debug.cfs_rq[10]:/.tg_load_avg_contrib 65687 ± 9% -16.3% 55002 ± 10% sched_debug.cfs_rq[13]:/.exec_clock 796846 ± 7% -13.6% 688111 ± 9% sched_debug.cfs_rq[13]:/.min_vruntime 645.50 ± 3% -6.9% 601.00 ± 4% sched_debug.cfs_rq[13]:/.tg_load_avg 646.00 ± 3% -6.5% 604.25 ± 4% sched_debug.cfs_rq[14]:/.tg_load_avg 53581 ± 21% -34.5% 35083 ± 33% sched_debug.cfs_rq[15]:/.exec_clock 642502 ± 22% -32.0% 436623 ± 30% sched_debug.cfs_rq[15]:/.min_vruntime 645.75 ± 3% -6.4% 604.50 ± 4% sched_debug.cfs_rq[15]:/.tg_load_avg 675242 ± 13% -18.5% 550135 ± 2% sched_debug.cfs_rq[2]:/.min_vruntime 21.50 ± 34% -66.3% 7.25 ± 38% sched_debug.cfs_rq[3]:/.nr_spread_over 29.75 ± 24% -26.9% 21.75 ± 3% sched_debug.cfs_rq[3]:/.runnable_load_avg 523.50 ± 15% -21.6% 410.50 ± 19% sched_debug.cfs_rq[4]:/.util_avg 17.50 ± 48% -50.0% 8.75 ± 27% sched_debug.cfs_rq[6]:/.nr_spread_over -306445 ±-46% -69.1% -94668 ±-159% sched_debug.cfs_rq[7]:/.spread0 475.50 ± 11% +14.8% 545.75 ± 12% sched_debug.cfs_rq[7]:/.util_avg 32.25 ± 7% -19.4% 26.00 ± 11% sched_debug.cpu#10.cpu_load[1] 30.50 ± 9% -18.0% 25.00 ± 12% sched_debug.cpu#10.cpu_load[3] 30.25 ± 10% -17.4% 25.00 ± 12% sched_debug.cpu#10.cpu_load[4] 71304 ± 16% -19.2% 57587 ± 9% sched_debug.cpu#13.nr_load_updates 8607 ± 29% -41.9% 4998 ± 32% sched_debug.cpu#14.ttwu_count 30.50 ± 24% -48.4% 15.75 ± 20% sched_debug.cpu#15.cpu_load[0] 43.50 ± 9% -21.3% 34.25 ± 10% sched_debug.cpu#2.cpu_load[2] 42.50 ± 15% -37.1% 26.75 ± 27% sched_debug.cpu#3.cpu_load[0] 43.00 ± 14% -34.9% 28.00 ± 20% sched_debug.cpu#3.cpu_load[1] 42.75 ± 13% -34.5% 28.00 ± 20% sched_debug.cpu#3.cpu_load[2] 42.50 ± 12% -34.7% 27.75 ± 18% sched_debug.cpu#3.cpu_load[3] 42.00 ± 10% -34.5% 27.50 ± 18% sched_debug.cpu#3.cpu_load[4] 37.75 ± 13% -26.5% 27.75 ± 27% sched_debug.cpu#4.cpu_load[4] 28.00 ± 37% +40.2% 39.25 ± 6% sched_debug.cpu#7.cpu_load[0] 1131 ± 16% +41.7% 1602 ± 19% sched_debug.cpu#7.curr->pid 42073 ± 30% +56.9% 66014 ± 6% sched_debug.cpu#7.nr_load_updates 4725 ± 34% -30.5% 3284 ± 6% sched_debug.cpu#8.sched_goidle lituya: Grantley Haswell Memory: 16G turbostat.Avg_MHz 1650 ++-----------*-**-*-*-*-**-*-----------------------------------------+ *.*.**.*.*.* *.*.*.**.*.*.*.**.*.*.*.**.*.*.*.*.**.*.* 1600 ++ | 1550 ++ | | | 1500 ++ O O O | 1450 O+O O O O O O O O O OO O O O O OO | | | 1400 ++ | 1350 ++ | | | 1300 ++ | 1250 ++ | | O O O | 1200 ++------------------------------------O------------------------------+ turbostat._Busy 50 ++-------------------------------------------------------------------+ | .*.**.*.*.*.**.*. | 49.5 *+*.* .*.*.* *.*.*.**.*.*.*.**.*.*.*.**.*.*.*.*.**.*.* 49 ++ * | | | 48.5 ++ | | | 48 ++ O | O O OO O O O O O O O | 47.5 ++ O O O O O O O OO | 47 ++ | | | 46.5 ++ O O O | | O | 46 ++-------------------------------------------------------------------+ turbostat.PkgWatt 64 ++---------------------------------------------------------------------+ | .* * | 62 ++*.*. *.*.*. .*. .**. .*.*.*.*.*.** + .*.*.* + .*.*.*.*.* .*.*.* 60 *+ * * *.* * *.* *.* * | | | 58 ++ | 56 ++ | O O OO O O OO O O O O O O | 54 ++O O O O O O O | 52 ++ | | | 50 ++ | 48 ++ | | O O O O | 46 ++---------------------------------------------------------------------+ will-it-scale.per_thread_ops 305000 ++-----------------------------------------------------------------+ 300000 ++ .*.*.*. | *.*.* *.*.**.*. *. .*.** **.*.*.**.*.*.**.*.*.**.*.*.**.*.* 295000 ++ : : *.* * | 290000 ++ : : | | : : | 285000 O+ :: | 280000 ++O OO:O O O OO O | 275000 ++ :: O O O OO O O O OO | | : O | 270000 ++ * | 265000 ++ | | | 260000 ++ OO | 255000 ++-----------------------------------O-O---------------------------+ will-it-scale.time.system_time 940 ++--------------------------------------------------------------------+ *.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.*.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.* 920 ++ | 900 ++ | | | 880 ++ | 860 ++ | O OO | 840 ++O O O O O O O O O O OO O O O O | 820 ++ O O | | | 800 ++ | 780 ++ | | | 760 ++-------------------------------------O-OO-O-------------------------+ will-it-scale.time.percent_of_cpu_this_job_got 320 ++--*------------**-*---*-*-*----*------------------------------------+ *.* **.*.*.*.* * *.* *.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.* 310 ++ | | | | | 300 ++ | | | 290 O+ O OO O O O O | | O O O OO O O O OO O O O | 280 ++ | | | | | 270 ++ O O O | | O | 260 ++--------------------------------------------------------------------+ will-it-scale.time.minor_page_faults 2.5e+08 ++----------------------------------------------------------------+ *.**. .*.**. .*.**.*.**.*.*.**.*.*.**.*.*.**.*.*.**.* 2.4e+08 ++ * *.*.**.*.*.**.* | 2.3e+08 ++ | | | 2.2e+08 ++ | O | 2.1e+08 ++OO O O O OO O O OO O O OO O O | | O O O | 2e+08 ++ | 1.9e+08 ++ | | | 1.8e+08 ++ | | O O O O | 1.7e+08 ++----------------------------------------------------------------+ will-it-scale.time.voluntary_context_switches 170000 ++-----------------------------------------------------------------+ | | 160000 *+*.**.*.*.* .*.*.*.**.*.*.**.*.*.**.*.*.**.*.*.**.*.* | *.*.*.**.*.*.** | | | 150000 ++ | | | 140000 O+ OO O | | O O O OO O O OO O O O OO | 130000 ++ O O O | | | | | 120000 ++ | | | 110000 ++-----------------------------------O-O-OO------------------------+ time.system_time 940 ++--------------------------------------------------------------------+ *.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.*.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.* 920 ++ | 900 ++ | | | 880 ++ | 860 ++ | O OO | 840 ++O O O O O O O O O O OO O O O O | 820 ++ O O | | | 800 ++ | 780 ++ | | | 760 ++-------------------------------------O-OO-O-------------------------+ time.percent_of_cpu_this_job_got 320 ++--*------------**-*---*-*-*----*------------------------------------+ *.* **.*.*.*.* * *.* *.*.*.**.*.*.*.*.**.*.*.*.*.**.*.*.* 310 ++ | | | | | 300 ++ | | | 290 O+ O OO O O O O | | O O O OO O O O OO O O O | 280 ++ | | | | | 270 ++ O O O | | O | 260 ++--------------------------------------------------------------------+ time.minor_page_faults 2.5e+08 ++----------------------------------------------------------------+ *.**. .*.**. .*.**.*.**.*.*.**.*.*.**.*.*.**.*.*.**.* 2.4e+08 ++ * *.*.**.*.*.**.* | 2.3e+08 ++ | | | 2.2e+08 ++ | O | 2.1e+08 ++OO O O O OO O O OO O O OO O O | | O O O | 2e+08 ++ | 1.9e+08 ++ | | | 1.8e+08 ++ | | O O O O | 1.7e+08 ++----------------------------------------------------------------+ time.voluntary_context_switches 170000 ++-----------------------------------------------------------------+ | | 160000 *+*.**.*.*.* .*.*.*.**.*.*.**.*.*.**.*.*.**.*.*.**.*.* | *.*.*.**.*.*.** | | | 150000 ++ | | | 140000 O+ OO O | | O O O OO O O OO O O O OO | 130000 ++ O O O | | | | | 120000 ++ | | | 110000 ++-----------------------------------O-O-OO------------------------+ [*] bisect-good sample [O] bisect-bad sample To reproduce: git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git cd lkp-tests bin/lkp install job.yaml # job file is attached in this email bin/lkp run job.yaml Disclaimer: Results have been estimated based on internal Intel analysis and are provided for informational purposes only. Any difference in system hardware or software design or configuration may affect actual performance. Thanks, Ying Huang
--- LKP_SERVER: inn LKP_CGI_PORT: 80 LKP_CIFS_PORT: 139 testcase: will-it-scale default-monitors: wait: activate-monitor kmsg: uptime: iostat: vmstat: numa-numastat: numa-vmstat: numa-meminfo: proc-vmstat: proc-stat: interval: 10 meminfo: slabinfo: interrupts: lock_stat: latency_stats: softirqs: bdi_dev_mapping: diskstats: nfsstat: cpuidle: cpufreq-stats: turbostat: pmeter: sched_debug: interval: 60 cpufreq_governor: powersave default-watchdogs: oom-killer: watchdog: commit: fe77e83e9cb25d71fce9de8782d6d75c63145d68 model: Grantley Haswell nr_cpu: 16 memory: 16G hdd_partitions: swap_partitions: rootfs_partition: category: benchmark perf-profile: freq: 800 will-it-scale: test: page_fault2 queue: cyclic testbox: lituya tbox_group: lituya kconfig: x86_64-rhel enqueue_time: 2015-11-07 08:04:55.596421079 +08:00 id: cd8fb8ca5316f6746c21d702bac20042856023e2 user: lkp compiler: gcc-4.9 head_commit: fe77e83e9cb25d71fce9de8782d6d75c63145d68 base_commit: 6a13feb9c82803e2b815eca72fa7a9f5561d7861 branch: linux-devel/devel-hourly-2015110812 kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/fe77e83e9cb25d71fce9de8782d6d75c63145d68/vmlinuz-4.3.0-bochs-virtio-gpu-wl-ath-13338-gfe77e83" rootfs: debian-x86_64-2015-02-07.cgz result_root: "/result/will-it-scale/powersave-page_fault2/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/fe77e83e9cb25d71fce9de8782d6d75c63145d68/0" job_file: "/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-page_fault2-x86_64-rhel-CYCLIC_HEAD-fe77e83e9cb25d71fce9de8782d6d75c63145d68-20151107-41532-2wu2r3-0.yaml" dequeue_time: 2015-11-08 19:55:34.445916281 +08:00 max_uptime: 1500 initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz" bootloader_append: - root=/dev/ram0 - user=lkp - job=/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-page_fault2-x86_64-rhel-CYCLIC_HEAD-fe77e83e9cb25d71fce9de8782d6d75c63145d68-20151107-41532-2wu2r3-0.yaml - ARCH=x86_64 - kconfig=x86_64-rhel - branch=linux-devel/devel-hourly-2015110812 - commit=fe77e83e9cb25d71fce9de8782d6d75c63145d68 - BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/fe77e83e9cb25d71fce9de8782d6d75c63145d68/vmlinuz-4.3.0-bochs-virtio-gpu-wl-ath-13338-gfe77e83 - max_uptime=1500 - RESULT_ROOT=/result/will-it-scale/powersave-page_fault2/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/fe77e83e9cb25d71fce9de8782d6d75c63145d68/0 - LKP_SERVER=inn - |2- earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz" modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/fe77e83e9cb25d71fce9de8782d6d75c63145d68/modules.cgz" bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/will-it-scale.cgz" job_state: finished loadavg: 13.24 6.73 2.70 1/220 4882 start_time: '1446983758' end_time: '1446984063' version: "/lkp/lkp/.src-20151106-190021"
echo powersave > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor echo powersave > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor ./runtest.py page_fault2 32 both 1 8 12 16