FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 8f2f3eb59dff4ec538de55f2e0592fec85966aab ("fsnotify: fix oops in 
fsnotify_clear_marks_by_group_flags()")


=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
  
lkp-sbx04/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/read1

commit: 
  447f6a95a9c80da7faaec3e66e656eab8f262640
  8f2f3eb59dff4ec538de55f2e0592fec85966aab

447f6a95a9c80da7 8f2f3eb59dff4ec538de55f2e0 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   1844687 ±  0%      -4.0%    1770899 ±  0%  will-it-scale.per_thread_ops
    283.69 ±  0%      +9.5%     310.64 ±  0%  will-it-scale.time.user_time
      4576 ±  3%      -7.3%       4242 ±  6%  
will-it-scale.time.voluntary_context_switches
      7211 ± 10%     +54.0%      11101 ± 18%  cpuidle.C1E-SNB.usage
     10636 ± 36%     +69.3%      18003 ± 36%  numa-meminfo.node1.Shmem
      1.07 ±  4%     -13.1%       0.93 ±  9%  
perf-profile.cpu-cycles.selinux_file_permission.security_file_permission.rw_verify_area.vfs_read.sys_read
      4576 ±  3%      -7.3%       4242 ±  6%  time.voluntary_context_switches
    526.75 ±104%     -94.2%      30.50 ± 98%  numa-numastat.node1.other_node
      1540 ± 35%     -74.2%     398.00 ± 90%  numa-numastat.node2.other_node
     32344 ±  5%      +7.4%      34722 ±  4%  numa-vmstat.node0.numa_other
      2658 ± 36%     +69.3%       4500 ± 36%  numa-vmstat.node1.nr_shmem
    935792 ±136%   +4247.3%   40682138 ±141%  
latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
    935792 ±136%   +4247.3%   40682138 ±141%  
latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
    935792 ±136%   +4247.3%   40682138 ±141%  
latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
     12893 ±  2%      -9.1%      11716 ±  1%  slabinfo.kmalloc-192.active_objs
      1653 ±  9%     -10.3%       1483 ±  5%  slabinfo.mnt_cache.active_objs
      1653 ±  9%     -10.3%       1483 ±  5%  slabinfo.mnt_cache.num_objs
      1.75 ± 47%     -81.0%       0.33 ±141%  
sched_debug.cfs_rq[10]:/.nr_spread_over
   -343206 ±-27%     -73.2%     -91995 ±-170%  sched_debug.cfs_rq[14]:/.spread0
    533.25 ± 82%     -81.5%      98.75 ± 42%  
sched_debug.cfs_rq[18]:/.blocked_load_avg
    541.75 ± 82%     -81.3%     101.25 ± 41%  
sched_debug.cfs_rq[18]:/.tg_load_contrib
  -1217705 ± -5%     -30.2%    -850080 ±-15%  sched_debug.cfs_rq[26]:/.spread0
     89722 ±  9%      +9.8%      98495 ± 10%  
sched_debug.cfs_rq[32]:/.exec_clock
    101180 ±132%    +180.8%     284154 ± 30%  sched_debug.cfs_rq[35]:/.spread0
     37332 ±473%    +725.2%     308082 ± 59%  sched_debug.cfs_rq[38]:/.spread0
     32054 ±502%    +981.6%     346689 ± 39%  sched_debug.cfs_rq[39]:/.spread0
      1.00 ±100%    +100.0%       2.00 ± 50%  
sched_debug.cfs_rq[42]:/.nr_spread_over
   -125980 ±-218%    -307.1%     260875 ± 46%  sched_debug.cfs_rq[42]:/.spread0
   -111501 ±-102%    -288.7%     210354 ± 94%  sched_debug.cfs_rq[45]:/.spread0
   -173363 ±-34%    -221.0%     209775 ± 94%  sched_debug.cfs_rq[47]:/.spread0
   -302090 ±-43%    -121.8%      65953 ±322%  sched_debug.cfs_rq[4]:/.spread0
   -490175 ±-18%     -41.1%    -288722 ±-31%  sched_debug.cfs_rq[50]:/.spread0
   -594948 ±-10%     -59.7%    -239840 ±-33%  sched_debug.cfs_rq[51]:/.spread0
      1.00 ±100%   +6050.0%      61.50 ±141%  
sched_debug.cfs_rq[53]:/.blocked_load_avg
     10.50 ±  8%    +614.3%      75.00 ±122%  
sched_debug.cfs_rq[53]:/.tg_load_contrib
   -596043 ±-10%     -49.0%    -304277 ±-36%  sched_debug.cfs_rq[54]:/.spread0
     10.00 ±  0%   +2062.5%     216.25 ± 40%  
sched_debug.cfs_rq[56]:/.tg_load_contrib
     17.75 ±173%   +1302.8%     249.00 ± 26%  
sched_debug.cfs_rq[60]:/.blocked_load_avg
   -809633 ± -9%     -36.2%    -516886 ±-23%  sched_debug.cfs_rq[60]:/.spread0
     28.00 ±109%    +828.6%     260.00 ± 25%  
sched_debug.cfs_rq[60]:/.tg_load_contrib
    277.75 ± 95%     -86.3%      38.00 ±171%  
sched_debug.cfs_rq[7]:/.blocked_load_avg
    293.25 ± 90%     -81.8%      53.50 ±121%  
sched_debug.cfs_rq[7]:/.tg_load_contrib
     17.50 ±  2%     -28.6%      12.50 ± 34%  sched_debug.cpu#0.cpu_load[2]
     17.00 ±  4%     -25.0%      12.75 ± 35%  sched_debug.cpu#0.cpu_load[3]
      2907 ± 12%    +195.9%       8603 ± 63%  sched_debug.cpu#0.sched_goidle
     16.50 ±  3%      -9.1%      15.00 ±  0%  sched_debug.cpu#1.cpu_load[2]
     16.50 ±  3%      -7.6%      15.25 ±  2%  sched_debug.cpu#1.cpu_load[3]
      5595 ± 26%     -36.4%       3557 ± 11%  sched_debug.cpu#11.nr_switches
      6885 ± 92%     -76.2%       1639 ± 40%  sched_debug.cpu#11.ttwu_count
      1350 ± 34%     -55.0%     608.00 ± 14%  sched_debug.cpu#11.ttwu_local
     17892 ± 74%     -78.3%       3877 ± 18%  sched_debug.cpu#12.nr_switches
      1288 ± 27%     -49.8%     647.50 ± 37%  sched_debug.cpu#12.ttwu_local
      1405 ± 22%     -52.7%     664.50 ± 23%  sched_debug.cpu#13.ttwu_local
      1.25 ±182%    -440.0%      -4.25 ±-50%  
sched_debug.cpu#17.nr_uninterruptible
      1976 ±  5%     -10.0%       1779 ±  0%  sched_debug.cpu#18.curr->pid
    983.75 ±  8%    +101.6%       1983 ± 32%  sched_debug.cpu#18.ttwu_local
     -0.25 ±-911%   +2300.0%      -6.00 ±-28%  
sched_debug.cpu#21.nr_uninterruptible
      2979 ± 49%    +159.6%       7734 ± 75%  sched_debug.cpu#22.ttwu_count
      1111 ± 21%    +127.6%       2528 ± 32%  sched_debug.cpu#22.ttwu_local
      1.00 ±141%    -275.0%      -1.75 ±-84%  
sched_debug.cpu#25.nr_uninterruptible
     14419 ± 54%     -58.2%       6022 ± 84%  sched_debug.cpu#25.ttwu_count
     14395 ± 70%    +252.4%      50729 ± 39%  sched_debug.cpu#28.nr_switches
     -4.75 ±-17%    -115.8%       0.75 ±218%  
sched_debug.cpu#30.nr_uninterruptible
      2335 ±115%     -76.6%     547.25 ± 18%  sched_debug.cpu#34.ttwu_count
      1258 ± 25%     -43.3%     713.75 ± 11%  sched_debug.cpu#35.nr_switches
      1409 ± 23%     -39.6%     851.75 ±  9%  sched_debug.cpu#35.sched_count
    969.50 ± 69%     -68.8%     302.00 ± 38%  sched_debug.cpu#35.ttwu_count
    382.00 ± 37%     -66.0%     130.00 ± 14%  sched_debug.cpu#35.ttwu_local
    808.75 ± 18%     +28.3%       1037 ± 15%  sched_debug.cpu#38.nr_switches
    948.50 ± 16%     +23.2%       1168 ± 13%  sched_debug.cpu#38.sched_count
     70695 ±  2%      +6.2%      75047 ±  4%  sched_debug.cpu#41.nr_load_updates
      1269 ± 13%     +55.3%       1970 ± 25%  sched_debug.cpu#46.nr_switches
      3.25 ± 93%     -76.9%       0.75 ±197%  
sched_debug.cpu#46.nr_uninterruptible
      1375 ± 12%     +51.1%       2078 ± 23%  sched_debug.cpu#46.sched_count
      3958 ± 97%    +462.9%      22281 ± 25%  sched_debug.cpu#50.ttwu_count
    457.25 ± 26%     +64.3%     751.25 ± 28%  sched_debug.cpu#53.ttwu_local
    753041 ±  3%     -11.1%     669815 ±  5%  sched_debug.cpu#58.avg_idle
     -1.75 ±-142%    -257.1%       2.75 ± 64%  
sched_debug.cpu#59.nr_uninterruptible
      2581 ± 27%   +1426.4%      39408 ± 57%  sched_debug.cpu#60.nr_switches
      2632 ± 27%   +1400.2%      39495 ± 57%  sched_debug.cpu#60.sched_count
     34156 ± 94%     -94.8%       1776 ± 15%  sched_debug.cpu#61.nr_switches
     34250 ± 94%     -94.7%       1825 ± 15%  sched_debug.cpu#61.sched_count
     16821 ± 96%     -95.4%     768.50 ± 11%  sched_debug.cpu#61.sched_goidle
      8128 ±146%     -91.7%     676.00 ± 10%  sched_debug.cpu#61.ttwu_count

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
  
ivb42/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/readseek1

commit: 
  447f6a95a9c80da7faaec3e66e656eab8f262640
  8f2f3eb59dff4ec538de55f2e0592fec85966aab

447f6a95a9c80da7 8f2f3eb59dff4ec538de55f2e0 
---------------- -------------------------- 
         %stddev     %change         %stddev
             \          |                \  
   1915464 ±  0%      -2.4%    1869344 ±  0%  will-it-scale.per_thread_ops
    473.17 ±  0%      +6.9%     505.66 ±  0%  will-it-scale.time.user_time
      0.20 ±  5%     -49.4%       0.10 ± 35%  turbostat.Pkg%pc6
      3.38 ±  0%     +34.0%       4.53 ±  1%  
perf-profile.cpu-cycles.find_get_entry.find_lock_entry.shmem_getpage_gfp.shmem_file_read_iter.__vfs_read
      7.42 ±  0%     +16.3%       8.62 ±  1%  
perf-profile.cpu-cycles.find_lock_entry.shmem_getpage_gfp.shmem_file_read_iter.__vfs_read.vfs_read
      0.57 ±  6%     +72.2%       0.99 ±  6%  
perf-profile.cpu-cycles.radix_tree_lookup_slot.find_get_entry.find_lock_entry.shmem_getpage_gfp.shmem_file_read_iter
     10.58 ±  0%     +11.4%      11.79 ±  1%  
perf-profile.cpu-cycles.shmem_getpage_gfp.shmem_file_read_iter.__vfs_read.vfs_read.sys_read
     30.50 ±150%   +1140.2%     378.25 ± 49%  
sched_debug.cfs_rq[22]:/.blocked_load_avg
     44.75 ±103%    +788.3%     397.50 ± 46%  
sched_debug.cfs_rq[22]:/.tg_load_contrib
     89.50 ±159%    +300.3%     358.25 ± 75%  
sched_debug.cfs_rq[2]:/.blocked_load_avg
    115.75 ±123%    +231.7%     384.00 ± 70%  
sched_debug.cfs_rq[2]:/.tg_load_contrib
      0.50 ±100%    +750.0%       4.25 ± 67%  
sched_debug.cfs_rq[32]:/.nr_spread_over
    499.50 ± 44%     -98.2%       9.00 ±101%  
sched_debug.cfs_rq[40]:/.blocked_load_avg
    505.50 ± 44%     -95.2%      24.50 ± 73%  
sched_debug.cfs_rq[40]:/.tg_load_contrib
    421.00 ± 56%     -85.7%      60.25 ±109%  
sched_debug.cfs_rq[42]:/.blocked_load_avg
    428.75 ± 56%     -80.4%      84.00 ± 86%  
sched_debug.cfs_rq[42]:/.tg_load_contrib
      8053 ±  2%     +13.4%       9132 ±  5%  
sched_debug.cfs_rq[47]:/.avg->runnable_avg_sum
    175.25 ±  2%     +12.7%     197.50 ±  5%  
sched_debug.cfs_rq[47]:/.tg_runnable_contrib
      0.25 ±173%   +1500.0%       4.00 ± 77%  
sched_debug.cfs_rq[8]:/.nr_spread_over
     90.75 ± 13%     -23.1%      69.75 ± 15%  sched_debug.cpu#0.cpu_load[2]
     97.00 ± 15%     -28.4%      69.50 ± 16%  sched_debug.cpu#0.cpu_load[3]
     99.50 ± 14%     -27.6%      72.00 ± 18%  sched_debug.cpu#0.cpu_load[4]
    -10.25 ±-14%     -73.2%      -2.75 ±-180%  
sched_debug.cpu#1.nr_uninterruptible
      8173 ±106%     -78.9%       1722 ± 35%  sched_debug.cpu#10.nr_switches
      3896 ±112%     -81.3%     727.50 ± 36%  sched_debug.cpu#10.sched_goidle
    515.00 ± 40%     -47.2%     271.75 ± 49%  sched_debug.cpu#10.ttwu_local
      2.00 ± 81%    -325.0%      -4.50 ±-77%  
sched_debug.cpu#11.nr_uninterruptible
      3818 ± 39%     -58.2%       1598 ± 68%  sched_debug.cpu#15.ttwu_local
      0.50 ±331%    -650.0%      -2.75 ±-74%  
sched_debug.cpu#16.nr_uninterruptible
     12671 ± 30%     -58.4%       5270 ± 46%  sched_debug.cpu#20.ttwu_count
      2285 ± 70%     -57.0%     983.50 ± 25%  sched_debug.cpu#20.ttwu_local
      2722 ± 79%     -72.9%     738.75 ± 51%  sched_debug.cpu#21.ttwu_local
     -2.50 ±-72%    -200.0%       2.50 ± 82%  
sched_debug.cpu#23.nr_uninterruptible
      1183 ± 31%    +188.4%       3413 ± 22%  sched_debug.cpu#24.nr_switches
      1384 ± 45%    +148.4%       3438 ± 22%  sched_debug.cpu#24.sched_count
    318.50 ± 54%    +347.5%       1425 ± 21%  sched_debug.cpu#24.ttwu_local
      5255 ± 46%     -60.2%       2090 ± 54%  sched_debug.cpu#25.nr_switches
      5276 ± 46%     -59.9%       2114 ± 54%  sched_debug.cpu#25.sched_count
      1893 ± 42%     -66.9%     627.00 ± 75%  sched_debug.cpu#25.ttwu_local
      1.25 ±142%    +240.0%       4.25 ± 45%  
sched_debug.cpu#27.nr_uninterruptible
      0.75 ±272%    -322.2%      -1.67 ±-28%  
sched_debug.cpu#31.nr_uninterruptible
      1977 ±140%     -86.5%     267.25 ± 10%  sched_debug.cpu#32.sched_goidle
      7.67 ± 78%    -122.8%      -1.75 ±-84%  
sched_debug.cpu#34.nr_uninterruptible
      3642 ± 37%    +205.0%      11108 ± 53%  sched_debug.cpu#39.nr_switches
      1250 ± 51%    +292.0%       4902 ± 52%  sched_debug.cpu#39.sched_goidle
      3.00 ±  0%    +216.7%       9.50 ± 30%  sched_debug.cpu#45.cpu_load[0]
      3.50 ± 24%    +121.4%       7.75 ± 10%  sched_debug.cpu#45.cpu_load[1]
      3.25 ± 13%    +123.1%       7.25 ± 11%  sched_debug.cpu#45.cpu_load[2]
      3.25 ± 13%     +92.3%       6.25 ± 23%  sched_debug.cpu#45.cpu_load[3]
      3.00 ±  0%     +91.7%       5.75 ± 22%  sched_debug.cpu#45.cpu_load[4]
      1593 ± 19%     +63.6%       2605 ± 30%  sched_debug.cpu#47.curr->pid
    365.75 ± 39%    +254.6%       1297 ± 98%  sched_debug.cpu#6.ttwu_local
      8717 ± 80%     -78.7%       1856 ± 45%  sched_debug.cpu#8.nr_switches
      3992 ± 85%     -80.5%     778.50 ± 51%  sched_debug.cpu#8.sched_goidle
      6221 ±128%     -83.9%     998.75 ± 44%  sched_debug.cpu#8.ttwu_count
    722.00 ± 71%     -69.5%     220.25 ±  5%  sched_debug.cpu#8.ttwu_local
      0.25 ±173%    +321.4%       1.05 ±  5%  sched_debug.rt_rq[12]:/.rt_time
      0.04 ±173%    +311.0%       0.17 ±  8%  sched_debug.rt_rq[13]:/.rt_time


lkp-sbx04: Sandy Bridge-EX
Memory: 64G

ivb42: Ivytown Ivy Bridge-EP
Memory: 64G




                           will-it-scale.time.user_time

  325 ++--------------------------------------------------------------------+
  320 ++      O       O          O                                          |
      |   O              O   O       O   O   O   O   O                      |
  315 O+          O                                             O           |
  310 ++                                                 O  O           O   O
      |                                                             O       |
  305 ++                                                                    |
  300 ++                                                                    |
  295 ++                                                                    |
      |                                      *..                            |
  290 ++         .*..          ..*...*..   ..   .                           |
  285 *+..*... ..    .  .*...*.         . .      *...*...*..*...*...  ..*.. |
      |       *       *.                 *                          *.     .|
  280 ++                                                                    *
  275 ++--------------------------------------------------------------------+


        [*] bisect-good sample
        [O] bisect-bad  sample

To reproduce:

        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/lkp install job.yaml  # job file is attached in this email
        bin/lkp run     job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: will-it-scale
default-monitors:
  wait: activate-monitor
  kmsg: 
  uptime: 
  iostat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat:
    interval: 10
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 60
cpufreq_governor: performance
default-watchdogs:
  oom-killer: 
  watchdog: 
commit: dd2384a75d1c046faf068a6352732a204814b86d
model: Sandy Bridge-EX
nr_cpu: 64
memory: 64G
nr_ssd_partitions: 4
ssd_partitions: "/dev/disk/by-id/ata-INTEL_SSDSC2CW240A3_CVCV20430*-part1"
swap_partitions: 
category: benchmark
perf-profile:
  freq: 800
will-it-scale:
  test: read1
queue: cyclic
testbox: lkp-sbx04
tbox_group: lkp-sbx04
kconfig: x86_64-rhel
enqueue_time: 2015-08-08 06:51:04.467682345 +08:00
id: 7543484f1eea88c654299222e83a89fb3f8fbd44
user: lkp
compiler: gcc-4.9
head_commit: dd2384a75d1c046faf068a6352732a204814b86d
base_commit: 733db573a6451681b60e7372d2862de09d6eb04e
branch: linus/master
kernel: 
"/pkg/linux/x86_64-rhel/gcc-4.9/dd2384a75d1c046faf068a6352732a204814b86d/vmlinuz-4.2.0-rc5-00156-gdd2384a"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/will-it-scale/performance-read1/lkp-sbx04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/dd2384a75d1c046faf068a6352732a204814b86d/0"
job_file: 
"/lkp/scheduled/lkp-sbx04/cyclic_will-it-scale-performance-read1-x86_64-rhel-CYCLIC_HEAD-dd2384a75d1c046faf068a6352732a204814b86d-20150808-9771-1mploli-0.yaml"
dequeue_time: 2015-08-08 18:18:22.936002643 +08:00
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- 
job=/lkp/scheduled/lkp-sbx04/cyclic_will-it-scale-performance-read1-x86_64-rhel-CYCLIC_HEAD-dd2384a75d1c046faf068a6352732a204814b86d-20150808-9771-1mploli-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linus/master
- commit=dd2384a75d1c046faf068a6352732a204814b86d
- 
BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/dd2384a75d1c046faf068a6352732a204814b86d/vmlinuz-4.2.0-rc5-00156-gdd2384a
- max_uptime=1500
- 
RESULT_ROOT=/result/will-it-scale/performance-read1/lkp-sbx04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/dd2384a75d1c046faf068a6352732a204814b86d/0
- LKP_SERVER=inn
- |2-


  earlyprintk=ttyS0,115200 systemd.log_level=err
  debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
  panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 
prompt_ramdisk=0
  console=ttyS0,115200 console=tty0 vga=normal

  rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: 
"/pkg/linux/x86_64-rhel/gcc-4.9/dd2384a75d1c046faf068a6352732a204814b86d/modules.cgz"
bm_initrd: 
"/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/will-it-scale.cgz"
job_state: finished
loadavg: 47.44 21.45 8.37 1/628 11393
start_time: '1439029153'
end_time: '1439029462'
version: "/lkp/lkp/.src-20150807-183152"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu48/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu49/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu50/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu51/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu52/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu53/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu54/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu55/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu56/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu57/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu58/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu59/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu60/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu61/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu62/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu63/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./runtest.py read1 16 both 1 8 16 24 32 48 64

Reply via email to