FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 8a0516ed8b90c95ffa1363b420caa37418149f21 ("mm: convert p[te|md]_numa 
users to p[te|md]_protnone_numa")


testbox/testcase/testparams: lkp-sbx04/netperf/performance-900s-200%-TCP_MAERTS

e7bb4b6d1609cce3  8a0516ed8b90c95ffa1363b420  
----------------  --------------------------  
         %stddev     %change         %stddev
             \          |                \  
    226261 ±  1%   +2189.6%    5180560 ±  0%  netperf.time.minor_page_faults
       721 ±  0%      -1.7%        709 ±  0%  netperf.Throughput_Mbps
     12341 ± 16%    -100.0%          0 ±  0%  proc-vmstat.numa_pages_migrated
    364595 ±  3%    -100.0%          0 ±  0%  proc-vmstat.numa_hint_faults_local
    388922 ±  4%    -100.0%          0 ±  0%  proc-vmstat.numa_hint_faults
    226261 ±  1%   +2189.6%    5180560 ±  0%  time.minor_page_faults
    388831 ±  3%   +3987.5%   15893407 ±  0%  proc-vmstat.numa_pte_updates
     12341 ± 16%    -100.0%          0 ±  0%  proc-vmstat.pgmigrate_success
        47 ± 42%     -60.3%         18 ± 13%  
sched_debug.cfs_rq[5]:/.blocked_load_avg
        73 ± 19%     -53.9%         34 ± 18%  sched_debug.cfs_rq[46]:/.load
        32 ± 20%     +75.0%         56 ± 32%  sched_debug.cpu#32.load
        27 ± 37%     +61.1%         43 ± 27%  
sched_debug.cfs_rq[15]:/.blocked_load_avg
        54 ± 20%     -43.8%         30 ±  5%  sched_debug.cfs_rq[17]:/.load
        57 ± 30%     -39.8%         34 ± 17%  sched_debug.cfs_rq[53]:/.load
        70 ± 29%     -41.3%         41 ±  8%  
sched_debug.cfs_rq[5]:/.tg_load_contrib
        64 ± 20%     -27.9%         46 ± 14%  sched_debug.cpu#26.load
        34 ± 21%     +68.6%         57 ±  1%  sched_debug.cfs_rq[15]:/.load
        60 ± 21%     -28.2%         43 ± 26%  sched_debug.cfs_rq[6]:/.load
        50 ± 18%     +33.2%         67 ± 18%  
sched_debug.cfs_rq[15]:/.tg_load_contrib
        62 ± 28%     -40.6%         37 ± 32%  sched_debug.cfs_rq[30]:/.load
        59 ± 18%     -33.5%         39 ± 14%  sched_debug.cfs_rq[62]:/.load
       556 ± 25%     -54.2%        255 ± 36%  sched_debug.cpu#59.sched_goidle
      1.63 ±  2%     -31.2%       1.12 ±  0%  
perf-profile.cpu-cycles._raw_spin_lock.free_one_page.__free_pages_ok.free_compound_page.put_compound_page
        50 ± 40%     -35.5%         32 ± 16%  sched_debug.cpu#43.load
        31 ± 18%     +39.7%         44 ± 22%  sched_debug.cpu#53.load
      2.18 ±  3%     -29.0%       1.55 ±  3%  
perf-profile.cpu-cycles.free_one_page.__free_pages_ok.free_compound_page.put_compound_page.put_page
        46 ± 13%     -37.6%         29 ± 31%  
sched_debug.cfs_rq[16]:/.blocked_load_avg
        51 ± 26%     -36.6%         32 ±  6%  sched_debug.cpu#7.load
        73 ± 13%     -20.8%         58 ±  9%  
sched_debug.cfs_rq[51]:/.tg_load_contrib
      1.77 ±  2%     -25.1%       1.33 ±  1%  
perf-profile.cpu-cycles._raw_spin_lock_irqsave.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill
        58 ± 23%     -38.4%         35 ± 24%  sched_debug.cfs_rq[2]:/.load
   8833788 ±  8%     +22.5%   10821104 ± 12%  
sched_debug.cfs_rq[12]:/.max_vruntime
   8833787 ±  8%     +22.5%   10821104 ± 12%  
sched_debug.cfs_rq[12]:/.MIN_vruntime
      1951 ± 12%     +20.1%       2343 ± 12%  sched_debug.cpu#9.curr->pid
    112948 ±  2%     +25.6%     141909 ± 11%  sched_debug.cpu#32.sched_count
      1955 ±  9%     +17.3%       2293 ±  9%  sched_debug.cpu#46.curr->pid
   9533920 ± 16%     +31.8%   12561711 ± 13%  
sched_debug.cfs_rq[53]:/.max_vruntime
   9533919 ± 16%     +31.8%   12561711 ± 13%  
sched_debug.cfs_rq[53]:/.MIN_vruntime
      0.97 ± 10%     -15.7%       0.82 ±  6%  
perf-profile.cpu-cycles.tcp_send_mss.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
     59313 ± 24%     -21.3%      46703 ±  2%  sched_debug.cpu#25.ttwu_count
      3.92 ±  2%     -17.1%       3.25 ±  0%  
perf-profile.cpu-cycles.put_compound_page.put_page.skb_release_data.skb_release_all.__kfree_skb
      3.72 ±  2%     -16.4%       3.11 ±  0%  
perf-profile.cpu-cycles.free_compound_page.put_compound_page.put_page.skb_release_data.skb_release_all
      3.65 ±  1%     -16.8%       3.04 ±  0%  
perf-profile.cpu-cycles.__free_pages_ok.free_compound_page.put_compound_page.put_page.skb_release_data
      1853 ±  9%     +15.7%       2144 ±  5%  sched_debug.cpu#45.curr->pid
      1769 ±  4%     +19.9%       2121 ±  6%  sched_debug.cpu#61.curr->pid
      5.97 ±  2%     -16.1%       5.01 ±  0%  
perf-profile.cpu-cycles.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver
      1.59 ±  2%     -14.2%       1.37 ±  2%  
perf-profile.cpu-cycles.sk_stream_alloc_skb.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
      2.65 ±  3%     -17.1%       2.20 ±  1%  
perf-profile.cpu-cycles.tcp_transmit_skb.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv
      6.19 ±  1%     -15.3%       5.25 ±  0%  
perf-profile.cpu-cycles.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish
      4.45 ±  1%     -15.6%       3.75 ±  0%  
perf-profile.cpu-cycles.put_page.skb_release_data.skb_release_all.__kfree_skb.tcp_recvmsg
    693150 ±  3%     +14.6%     794663 ±  6%  sched_debug.cpu#1.avg_idle
      8.14 ±  1%     -14.7%       6.94 ±  0%  
perf-profile.cpu-cycles.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action
      7.37 ±  2%     -14.8%       6.28 ±  1%  
perf-profile.cpu-cycles.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv
      6.70 ±  1%     -14.3%       5.74 ±  1%  
perf-profile.cpu-cycles.__kfree_skb.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom
      1988 ±  7%      -8.0%       1829 ±  7%  sched_debug.cpu#33.curr->pid
      5.21 ±  1%     -14.0%       4.49 ±  0%  
perf-profile.cpu-cycles.skb_release_data.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg
      7.93 ±  2%     -14.5%       6.78 ±  0%  
perf-profile.cpu-cycles.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog
      7.73 ±  2%     -14.3%       6.62 ±  0%  
perf-profile.cpu-cycles.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb
      7.70 ±  2%     -14.6%       6.58 ±  0%  
perf-profile.cpu-cycles.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core
      8.90 ±  1%     -14.1%       7.65 ±  0%  
perf-profile.cpu-cycles.__do_softirq.do_softirq_own_stack.do_softirq.__local_bh_enable_ip.ip_finish_output
     39826 ±  8%     -11.0%      35446 ±  1%  sched_debug.cpu#22.ttwu_local
      6.29 ±  1%     -14.3%       5.38 ±  0%  
perf-profile.cpu-cycles.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg.sock_recvmsg
      8.68 ±  2%     -14.1%       7.45 ±  0%  
perf-profile.cpu-cycles.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.__local_bh_enable_ip
      8.43 ±  2%     -13.5%       7.29 ±  0%  
perf-profile.cpu-cycles.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq
      8.30 ±  2%     -13.5%       7.18 ±  0%  
perf-profile.cpu-cycles.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq
      8.32 ±  2%     -13.4%       7.21 ±  0%  
perf-profile.cpu-cycles.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack
      8.99 ±  2%     -13.3%       7.79 ±  1%  
perf-profile.cpu-cycles.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output.ip_output.ip_local_out_sk
      9.02 ±  1%     -13.3%       7.82 ±  1%  
perf-profile.cpu-cycles.__local_bh_enable_ip.ip_finish_output.ip_output.ip_local_out_sk.ip_queue_xmit
      1.02 ±  6%     -15.9%       0.86 ±  3%  
perf-profile.cpu-cycles.ip_queue_xmit.tcp_transmit_skb.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established
  10430785 ±  7%     -16.8%    8673532 ±  9%  
sched_debug.cfs_rq[49]:/.max_vruntime
  10430785 ±  7%     -16.8%    8673530 ±  9%  
sched_debug.cfs_rq[49]:/.MIN_vruntime
  11509303 ±  9%     -23.1%    8846526 ± 14%  
sched_debug.cfs_rq[13]:/.MIN_vruntime
  11509303 ±  9%     -23.1%    8846526 ± 14%  
sched_debug.cfs_rq[13]:/.max_vruntime
      8.95 ±  1%     -13.2%       7.77 ±  0%  
perf-profile.cpu-cycles.do_softirq_own_stack.do_softirq.__local_bh_enable_ip.ip_finish_output.ip_output
     40589 ±  6%     -10.4%      36365 ±  3%  sched_debug.cpu#54.ttwu_local
      4.34 ±  1%     -12.8%       3.78 ±  1%  
perf-profile.cpu-cycles.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg
      3.74 ±  1%     -14.7%       3.19 ±  1%  
perf-profile.cpu-cycles.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill
      1.02 ±  6%     -14.0%       0.88 ±  4%  
perf-profile.cpu-cycles.skb_release_head_state.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg
     15.16 ±  0%     +15.4%      17.50 ±  0%  
perf-profile.cpu-cycles.copy_user_generic_string.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
     10.04 ±  1%     -12.1%       8.82 ±  0%  
perf-profile.cpu-cycles.ip_output.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack
     10.11 ±  1%     -12.1%       8.88 ±  0%  
perf-profile.cpu-cycles.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf
     38453 ±  6%      -9.4%      34832 ±  2%  sched_debug.cpu#12.ttwu_local
     10.22 ±  1%     -12.5%       8.94 ±  0%  
perf-profile.cpu-cycles.ip_finish_output.ip_output.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb
      4.16 ±  1%     -13.3%       3.60 ±  1%  
perf-profile.cpu-cycles.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg
      3.71 ±  2%     -11.8%       3.27 ±  0%  
perf-profile.cpu-cycles.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish
        24 ±  9%     -14.4%         20 ±  3%  
sched_debug.cfs_rq[23]:/.runnable_load_avg
        23 ±  4%     -10.6%         21 ±  3%  sched_debug.cpu#23.cpu_load[0]
      5196 ±  6%     +19.6%       6213 ± 18%  numa-vmstat.node1.nr_anon_pages
     20787 ±  6%     +19.6%      24855 ± 18%  numa-meminfo.node1.AnonPages
      3.57 ±  2%     -12.9%       3.11 ±  0%  
perf-profile.cpu-cycles.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv
     10.44 ±  1%     -12.2%       9.17 ±  0%  
perf-profile.cpu-cycles.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg
     52615 ±  7%     -10.5%      47095 ±  3%  sched_debug.cpu#27.ttwu_count
    747189 ±  2%      +9.4%     817106 ±  5%  sched_debug.cpu#5.avg_idle
      4.58 ±  2%     -13.0%       3.98 ±  1%  
perf-profile.cpu-cycles.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg
      4.95 ±  1%     -12.0%       4.36 ±  1%  
perf-profile.cpu-cycles.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
     12.85 ±  1%     -11.8%      11.33 ±  0%  
perf-profile.cpu-cycles.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom
      9.45 ±  1%     -11.5%       8.36 ±  0%  
perf-profile.cpu-cycles.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg
     12.65 ±  1%     -11.8%      11.16 ±  0%  
perf-profile.cpu-cycles.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg.sock_recvmsg
      1967 ±  5%     -11.3%       1744 ±  4%  sched_debug.cpu#38.curr->pid
      1.50 ±  3%     -11.2%       1.33 ±  2%  
perf-profile.cpu-cycles.__alloc_skb.sk_stream_alloc_skb.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg
      1796 ±  7%     +16.9%       2101 ±  3%  sched_debug.cpu#20.curr->pid
     52803 ±  8%      -9.1%      48004 ±  2%  sched_debug.cpu#22.ttwu_count
    121926 ±  3%      -7.3%     113073 ±  3%  sched_debug.cpu#27.nr_switches
    122808 ±  3%      -7.8%     113235 ±  3%  sched_debug.cpu#27.sched_count
    121572 ±  5%      -7.1%     112948 ±  2%  sched_debug.cpu#46.nr_switches
     39590 ±  3%      -6.3%      37095 ±  4%  sched_debug.cpu#43.ttwu_local
     18366 ±  0%      -5.0%      17440 ±  0%  vmstat.system.cs
     70241 ±  0%      -2.8%      68273 ±  0%  vmstat.system.in

lkp-sbx04: Sandy Bridge-EX
Memory: 64G




                                time.minor_page_faults

  5.5e+06 ++----------------------------------------------------------------+
    5e+06 ++                                                       O O OO O O
          |                                                                 |
  4.5e+06 ++                                                                |
    4e+06 ++                                                                |
  3.5e+06 ++                                                                |
    3e+06 ++                                                                |
          |                                                                 |
  2.5e+06 ++                                                                |
    2e+06 ++                                                                |
  1.5e+06 ++                                                                |
    1e+06 ++                                                                |
          |                                                                 |
   500000 O+O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O OO O O          |
        0 ++----------------------------------------------------------------+


                            netperf.time.minor_page_faults

  5.5e+06 ++----------------------------------------------------------------+
    5e+06 ++                                                       O O OO O O
          |                                                                 |
  4.5e+06 ++                                                                |
    4e+06 ++                                                                |
  3.5e+06 ++                                                                |
    3e+06 ++                                                                |
          |                                                                 |
  2.5e+06 ++                                                                |
    2e+06 ++                                                                |
  1.5e+06 ++                                                                |
    1e+06 ++                                                                |
          |                                                                 |
   500000 O+O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O OO O O          |
        0 ++----------------------------------------------------------------+


        [*] bisect-good sample
        [O] bisect-bad  sample

To reproduce:

        apt-get install ruby
        git clone 
git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
        cd lkp-tests
        bin/setup-local job.yaml # the job file attached in this email
        bin/run-local   job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: netperf
default-monitors:
  wait: pre-test
  uptime: 
  iostat: 
  vmstat: 
  numa-numastat: 
  numa-vmstat: 
  numa-meminfo: 
  proc-vmstat: 
  proc-stat: 
  meminfo: 
  slabinfo: 
  interrupts: 
  lock_stat: 
  latency_stats: 
  softirqs: 
  bdi_dev_mapping: 
  diskstats: 
  nfsstat: 
  cpuidle: 
  cpufreq-stats: 
  turbostat: 
  pmeter: 
  sched_debug:
    interval: 10
default_watchdogs:
  watch-oom: 
  watchdog: 
cpufreq_governor: performance
commit: 5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833
model: Sandy Bridge-EX
nr_cpu: 64
memory: 64G
nr_ssd_partitions: 4
ssd_partitions: "/dev/disk/by-id/ata-INTEL_SSDSC2CW240A3_CVCV20430*-part1"
swap_partitions: 
runtime: 900s
nr_threads: 200%
perf-profile:
  freq: 800
netperf:
  test: TCP_MAERTS
testbox: lkp-sbx04
tbox_group: lkp-sbx04
kconfig: x86_64-rhel
enqueue_time: 2015-02-16 11:17:01.042650836 +08:00
head_commit: 5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833
base_commit: bfa76d49576599a4b9f9b7a71f23d73d6dcff735
branch: linux-devel/devel-hourly-2015021623
kernel: 
"/kernel/x86_64-rhel/5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833/vmlinuz-3.19.0-wl-ath-02305-g5aeb2a3"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: 
"/result/lkp-sbx04/netperf/performance-900s-200%-TCP_MAERTS/debian-x86_64-2015-02-07.cgz/x86_64-rhel/5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833/0"
job_file: 
"/lkp/scheduled/lkp-sbx04/cyclic_netperf-performance-900s-200%-TCP_MAERTS-x86_64-rhel-HEAD-5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833-0-20150216-93206-1sf912c.yaml"
dequeue_time: 2015-02-17 06:06:04.095206083 +08:00
job_state: finished
loadavg: 108.28 143.27 98.93 1/578 21192
start_time: '1424124423'
end_time: '1424125325'
version: "/lkp/lkp/.src-20150216-162040"
netserver
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
_______________________________________________
LKP mailing list
l...@linux.intel.com

Reply via email to