FYI, we noticed the below changes on

https://github.com/0day-ci/linux 
Waiman-Long/locking-mutex-Enable-optimistic-spinning-of-lock-waiter/20160210-035352
commit 526743800284f9bf1c3487754abdd0a04689986a ("locking/mutex: Enable 
optimistic spinning of woken task in wait queue")


=========================================================================================
compiler/cpufreq_governor/disk/filenum/fs/iomode/kconfig/nr_threads/period/rootfs/rwmode/size/tbox_group/testcase:
  
gcc-4.9/performance/1HDD/1024f/xfs/sync/x86_64-rhel/100%/600s/debian-x86_64-2015-02-07.cgz/seqrewr/64G/lkp-sb02/fileio

commit: 
  a97b24f27907ff7078dde5d8b162890657e9cffa
  526743800284f9bf1c3487754abdd0a04689986a

a97b24f27907ff70 526743800284f9bf1c3487754a 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
      0.02 ±  0%  +82950.0%      16.61 ±  0%  fileio.request_latency_95%_ms
      4409 ±  1%     +38.9%       6126 ±  2%  
fileio.time.involuntary_context_switches
      6.00 ±  0%     +33.3%       8.00 ±  0%  
fileio.time.percent_of_cpu_this_job_got
     36.06 ±  0%     +43.0%      51.55 ±  0%  fileio.time.system_time
   1828660 ±  0%     -92.5%     137258 ±  0%  
fileio.time.voluntary_context_switches
      6776 ±  3%     -83.3%       1134 ±  2%  vmstat.system.cs
    965.80 ±  3%   +1699.5%      17379 ±  0%  
latency_stats.avg.xfs_file_buffered_aio_write.xfs_file_write_iter.__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
   1775334 ±  0%     -94.4%      98980 ±  0%  
latency_stats.hits.xfs_file_buffered_aio_write.xfs_file_write_iter.__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
      2854 ±  1%     +28.3%       3663 ±  4%  
proc-vmstat.kswapd_high_wmark_hit_quickly
      2686 ±  5%     -31.5%       1839 ±  8%  
proc-vmstat.kswapd_low_wmark_hit_quickly
      4409 ±  1%     +38.9%       6126 ±  2%  time.involuntary_context_switches
      6.00 ±  0%     +33.3%       8.00 ±  0%  time.percent_of_cpu_this_job_got
     36.06 ±  0%     +43.0%      51.55 ±  0%  time.system_time
      1.61 ±  8%     +30.9%       2.11 ± 10%  time.user_time
   1828660 ±  0%     -92.5%     137258 ±  0%  time.voluntary_context_switches
 8.132e+08 ±  2%     -99.6%    2980944 ± 15%  cpuidle.C1-SNB.time
   1017420 ±  1%     -99.4%       6236 ±  7%  cpuidle.C1-SNB.usage
  15400570 ±  5%     -29.7%   10828903 ±  9%  cpuidle.C1E-SNB.time
     23282 ±  4%     -25.5%      17339 ±  4%  cpuidle.C1E-SNB.usage
  10079742 ±  6%     -34.3%    6626480 ±  5%  cpuidle.C3-SNB.time
     11636 ±  2%     -25.9%       8620 ±  6%  cpuidle.C3-SNB.usage
 9.165e+08 ± 12%    +158.8%  2.372e+09 ±  4%  cpuidle.C6-SNB.time
    215963 ±  3%     +11.9%     241655 ±  1%  cpuidle.C6-SNB.usage
 6.391e+08 ±  3%     -99.7%    1916334 ± 17%  cpuidle.POLL.time
    737818 ±  2%     -92.0%      59379 ±  8%  cpuidle.POLL.usage
     28.32 ±  5%     -89.4%       2.99 ±  3%  turbostat.%Busy
    846.40 ±  5%     -89.9%      85.11 ±  3%  turbostat.Avg_MHz
     34.14 ±  4%     -97.0%       1.02 ±  3%  turbostat.CPU%c1
      0.48 ±  7%     -29.2%       0.34 ±  4%  turbostat.CPU%c3
     37.05 ±  7%    +158.2%      95.65 ±  0%  turbostat.CPU%c6
     17.13 ±  4%     -85.2%       2.53 ±  2%  turbostat.CorWatt
      0.83 ± 40%   +4818.5%      40.92 ±  3%  turbostat.Pkg%pc2
      0.12 ± 11%    +199.4%       0.36 ±  8%  turbostat.Pkg%pc3
      1.96 ±181%   +2442.6%      49.76 ±  2%  turbostat.Pkg%pc6
     20.71 ±  3%     -70.8%       6.04 ±  1%  turbostat.PkgWatt
      9162 ±  3%     +23.4%      11309 ±  3%  sched_debug.cfs_rq:/.exec_clock.0
      6844 ±  3%     +31.9%       9030 ±  2%  sched_debug.cfs_rq:/.exec_clock.1
      6768 ±  4%     +31.1%       8870 ±  2%  sched_debug.cfs_rq:/.exec_clock.2
      6805 ±  4%     +30.7%       8892 ±  3%  sched_debug.cfs_rq:/.exec_clock.3
      7395 ±  2%     +28.8%       9525 ±  2%  
sched_debug.cfs_rq:/.exec_clock.avg
      9163 ±  3%     +23.4%      11309 ±  3%  
sched_debug.cfs_rq:/.exec_clock.max
      6562 ±  2%     +33.2%       8738 ±  2%  
sched_debug.cfs_rq:/.exec_clock.min
     28.21 ±  9%     +23.6%      34.85 ± 10%  sched_debug.cfs_rq:/.load_avg.avg
     15905 ±  6%     +17.9%      18747 ±  3%  
sched_debug.cfs_rq:/.min_vruntime.0
     14555 ±  6%     +18.1%      17190 ±  2%  
sched_debug.cfs_rq:/.min_vruntime.2
     14783 ± 10%     +23.9%      18316 ±  7%  
sched_debug.cfs_rq:/.min_vruntime.3
     15027 ±  2%     +17.8%      17708 ±  2%  
sched_debug.cfs_rq:/.min_vruntime.avg
     16764 ±  3%     +15.1%      19297 ±  4%  
sched_debug.cfs_rq:/.min_vruntime.max
     13379 ±  3%     +22.5%      16396 ±  2%  
sched_debug.cfs_rq:/.min_vruntime.min
     82.40 ± 13%     +27.6%     105.11 ± 13%  sched_debug.cfs_rq:/.util_avg.3
     84.55 ±  4%     +12.4%      95.06 ±  4%  sched_debug.cfs_rq:/.util_avg.avg
    691995 ±  5%     +12.5%     778781 ±  1%  sched_debug.cpu.avg_idle.avg
    430798 ± 20%     +45.9%     628519 ±  2%  sched_debug.cpu.avg_idle.min
    181201 ± 16%     -37.0%     114212 ±  8%  sched_debug.cpu.avg_idle.stddev
    533948 ±  5%     -80.7%     102858 ±  6%  sched_debug.cpu.nr_switches.0
    534186 ±  7%     -81.0%     101537 ±  9%  sched_debug.cpu.nr_switches.1
    544165 ± 10%     -82.8%      93401 ±  8%  sched_debug.cpu.nr_switches.2
    563662 ±  7%     -83.6%      92575 ± 10%  sched_debug.cpu.nr_switches.3
    543991 ±  2%     -82.1%      97593 ±  3%  sched_debug.cpu.nr_switches.avg
    602486 ±  4%     -81.7%     110318 ±  5%  sched_debug.cpu.nr_switches.max
    494168 ±  6%     -82.5%      86298 ±  6%  sched_debug.cpu.nr_switches.min
     40961 ± 45%     -77.8%       9113 ± 35%  sched_debug.cpu.nr_switches.stddev
    525594 ±  5%     -82.1%      94195 ±  7%  sched_debug.cpu.sched_count.0
    525022 ±  7%     -82.6%      91341 ±  9%  sched_debug.cpu.sched_count.1
    535427 ± 10%     -84.0%      85485 ±  9%  sched_debug.cpu.sched_count.2
    556754 ±  7%     -84.4%      86819 ± 10%  sched_debug.cpu.sched_count.3
    535700 ±  2%     -83.3%      89461 ±  3%  sched_debug.cpu.sched_count.avg
    594074 ±  4%     -82.9%     101334 ±  5%  sched_debug.cpu.sched_count.max
    485951 ±  6%     -83.7%      79152 ±  6%  sched_debug.cpu.sched_count.min
     40932 ± 44%     -79.6%       8357 ± 33%  sched_debug.cpu.sched_count.stddev
    260676 ±  5%     -82.9%      44493 ±  7%  sched_debug.cpu.sched_goidle.0
    260318 ±  7%     -83.5%      43008 ±  9%  sched_debug.cpu.sched_goidle.1
    265623 ± 10%     -84.8%      40286 ±  9%  sched_debug.cpu.sched_goidle.2
    276372 ±  7%     -85.2%      41011 ± 11%  sched_debug.cpu.sched_goidle.3
    265748 ±  2%     -84.1%      42200 ±  3%  sched_debug.cpu.sched_goidle.avg
    294884 ±  4%     -83.7%      48058 ±  5%  sched_debug.cpu.sched_goidle.max
    240878 ±  6%     -84.6%      37148 ±  6%  sched_debug.cpu.sched_goidle.min
     20456 ± 44%     -79.9%       4114 ± 33%  
sched_debug.cpu.sched_goidle.stddev
    266102 ±  5%     -83.0%      45259 ± 13%  sched_debug.cpu.ttwu_count.0
    271621 ±  8%     -84.2%      42969 ±  9%  sched_debug.cpu.ttwu_count.1
    268563 ± 11%     -83.5%      44288 ± 16%  sched_debug.cpu.ttwu_count.2
    259619 ±  8%     -84.0%      41599 ± 12%  sched_debug.cpu.ttwu_count.3
    266477 ±  2%     -83.7%      43529 ±  3%  sched_debug.cpu.ttwu_count.avg
    292475 ±  6%     -82.4%      51569 ±  9%  sched_debug.cpu.ttwu_count.max
    237977 ±  4%     -84.6%      36626 ±  6%  sched_debug.cpu.ttwu_count.min
     20654 ± 49%     -72.1%       5763 ± 34%  sched_debug.cpu.ttwu_count.stddev


lkp-sb02: Sandy Bridge-EP
Memory: 4G

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fileio
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: 526743800284f9bf1c3487754abdd0a04689986a
model: Sandy Bridge-EP
memory: 4G
hdd_partitions: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part2"
swap_partitions: 
rootfs_partition: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part1"
category: benchmark
period: 600s
nr_threads: 100%
disk: 1HDD
fs: xfs
size: 64G
fileio:
  filenum: 1024f
  rwmode: seqrewr
  iomode: sync
queue: bisect
testbox: lkp-sb02
tbox_group: lkp-sb02
kconfig: x86_64-rhel
enqueue_time: 2016-02-21 01:54:41.226790092 +08:00
id: 66b56d4041f263311a010d005bb91c35b48932a4
user: lkp
compiler: gcc-4.9
head_commit: 40ab400dacd1bd3ffbdeb8aa078e2885bdd3cfd5
base_commit: 18558cae0272f8fd9647e69d3fec1565a7949865
branch: linux-devel/devel-hourly-2016021912
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/fileio/performance-600s-100%-1HDD-xfs-64G-1024f-seqrewr-sync/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/0"
job_file: 
"/lkp/scheduled/lkp-sb02/bisect_fileio-performance-600s-100%-1HDD-xfs-64G-1024f-seqrewr-sync-debian-x86_64-2015-02-07.cgz-x86_64-rhel-526743800284f9bf1c3487754abdd0a04689986a-20160221-119225-15rq60y-0.yaml"
nr_cpu: "$(nproc)"
max_uptime: 1849.42
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-sb02/bisect_fileio-performance-600s-100%-1HDD-xfs-64G-1024f-seqrewr-sync-debian-x86_64-2015-02-07.cgz-x86_64-rhel-526743800284f9bf1c3487754abdd0a04689986a-20160221-119225-15rq60y-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2016021912
- commit=526743800284f9bf1c3487754abdd0a04689986a
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/vmlinuz-4.5.0-rc3-00302-g5267438
- max_uptime=1849
- 
RESULT_ROOT=/result/fileio/performance-600s-100%-1HDD-xfs-64G-1024f-seqrewr-sync/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fileio.cgz"
linux_headers_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/linux-headers.cgz"
repeat_to: 5
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/526743800284f9bf1c3487754abdd0a04689986a/vmlinuz-4.5.0-rc3-00302-g5267438"
dequeue_time: 2016-02-21 02:25:29.321271380 +08:00
job_state: finished
loadavg: 5.32 4.87 2.72 1/128 10580
start_time: '1455992765'
end_time: '1455993367'
version: "/lkp/lkp/.src-20160220-225840"

Attachment: reproduce.sh
Description: Bourne shell script

Reply via email to