On Fri 21 Dec 2012 05:55:14 PM CST, Amos Kong wrote:
On Fri, Dec 21, 2012 at 11:36:31AM +0800, wenli wrote:
On Fri 21 Dec 2012 11:16:15 AM CST, Amos Kong wrote:
On Fri, Dec 21, 2012 at 11:07:43AM +0800, wenli wrote:
On Thu 20 Dec 2012 11:23:39 AM CST, Amos Kong wrote:
Enable demo mode of netperf, process mixed demo results to drop
unstable factors, only compute the effective demo results when
all the threads work. This would make performance result more
stable.

We start many clients and watch the demo output, if all the threads
really work, then start to record the interrupt/packet number, and
count the cpu usage. After a fixed test time, record the final
interrupt/packet number.

Signed-off-by: Amos Kong<[email protected]>
---
  shared/cfg/subtests.cfg.sample |    2 +-
  tests/netperf.py               |   95 ++++++++++++++++++++++++++++-----------
  2 files changed, 69 insertions(+), 28 deletions(-)

diff --git a/shared/cfg/subtests.cfg.sample b/shared/cfg/subtests.cfg.sample
index df30cdb..9b5bedd 100644
--- a/shared/cfg/subtests.cfg.sample
+++ b/shared/cfg/subtests.cfg.sample
@@ -2480,7 +2480,7 @@ variants:
          # bridge_nic2 = switch
          nic_model_nic2 = e1000
          netperf_files = netperf-2.6.0.tar.bz2
-        setup_cmd = "cd /tmp&&    rm -rf netperf-2.6.0&&    tar xvfj netperf-2.6.0.tar.bz2&&    cd 
netperf-2.6.0&&    ./configure --enable-burst&&    make"
+        setup_cmd = "cd /tmp&&    rm -rf netperf-2.6.0&&    tar xvfj netperf-2.6.0.tar.bz2&&    cd 
netperf-2.6.0&&    ./configure --enable-burst --enable-demo=yes&&    make"
          # configure netperf test parameters
          l = 60
          protocols = "TCP_STREAM TCP_MAERTS TCP_RR"
diff --git a/tests/netperf.py b/tests/netperf.py
index cde8a39..68090fc 100644
--- a/tests/netperf.py
+++ b/tests/netperf.py
@@ -1,4 +1,4 @@
-import logging, os, commands, threading, re, glob, time
+import logging, os, commands, threading, re, glob, time, shutil
  from autotest.client import utils
  from autotest.client.shared import error
  from virttest import utils_test, utils_misc, remote
@@ -247,18 +247,6 @@ def start_test(server, server_ctl, host, clients, 
resultsdir, l=60,
      @param server_cyg: shell session for cygwin in windows guest
      """

-    def parse_file(file_prefix, raw=""):
-        """ Parse result files and reture throughput total """
-        thu = 0
-        for filename in glob.glob("%s.*.nf" % file_prefix):
-            o = commands.getoutput("cat %s |tail -n 1" % filename)
-            try:
-                thu += float(o.split()[raw])
-            except Exception:
-                logging.debug(commands.getoutput("cat %s.*" % file_prefix))
-                return -1
-        return thu
-
      guest_ver_cmd = params.get("guest_ver_cmd", "uname -r")
      fd = open("%s/netperf-result.%s.RHS" % (resultsdir, time.time()), "w")

@@ -307,19 +295,18 @@ def start_test(server, server_ctl, host, clients, 
resultsdir, l=60,
              for j in sessions_test:
                  if (protocol == "TCP_RR"):
                      ret = launch_client(j, server, server_ctl, host, clients, 
l,
-                    "-t %s -v 0 -P -0 -- -r %s,%s" % (protocol, i, i),
+                    "-t %s -v 1 -- -r %s,%s" % (protocol, i, i),
                      netserver_port, params, server_cyg)
-                    thu = parse_file("/tmp/netperf.%s" % ret['pid'], 0)
                  elif (protocol == "TCP_MAERTS"):
                      ret = launch_client(j, server, server_ctl, host, clients, 
l,
                                       "-C -c -t %s -- -m ,%s" % (protocol, i),
                                       netserver_port, params, server_cyg)
-                    thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4)
                  else:
                      ret = launch_client(j, server, server_ctl, host, clients, 
l,
                                       "-C -c -t %s -- -m %s" % (protocol, i),
                                       netserver_port, params, server_cyg)
-                    thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4)
+
+                thu = float(ret['thu'])
                  cpu = 100 - float(ret['mpstat'].split()[mpstat_index])
                  normal = thu / cpu
                  if ret.get('rx_pkts') and ret.get('irq_inj'):
@@ -353,7 +340,7 @@ def start_test(server, server_ctl, host, clients, 
resultsdir, l=60,

                  fd.flush()
                  logging.debug("Remove temporary files")
-                commands.getoutput("rm -f /tmp/netperf.%s.*.nf" % ret['pid'])
+                commands.getoutput("rm -f /tmp/netperf.%s.nf" % ret['pid'])
      fd.close()


@@ -472,19 +459,51 @@ def launch_client(sessions, server, server_ctl, host, 
clients, l, nf_args,

      def netperf_thread(i, numa_enable, client_s):
          cmd = ""
+        fname = "/tmp/netperf.%s.nf" % pid
          if numa_enable:
              output = ssh_cmd(client_s, "numactl --hardware")
              n = int(re.findall("available: (\d+) nodes", output)[0]) - 1
              cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n)
-        cmd += "%s -H %s -l %s %s" % (client_path, server, l, nf_args)
+        cmd += "%s -D 1 -H %s -l %s %s" % (client_path, server, int(l)*1.5, 
nf_args)
+        cmd += ">>    %s" % fname
+
+        ssh_cmd(client_s, cmd)
+        output = ssh_cmd(client_s, "cat %s" % fname)
+        if not os.path.exists(fname):
+            f = file(fname, "w")
+            f.write(output)
+            f.close()
+
+
+    def parse_demo_result(fname, sessions):
+        """
+        Process the demo result, remove the noise from head,
+        and compute the final throughout.
+
+        @param fname: result file name
+        @param sessions: sessions' number
+        """
+        fd = open(fname)
+        lines = fd.readlines()
+        fd.close()
+
+        for i in range(1, len(lines)+1):
+            if "AF_INET" in lines[-i]:
+                break
+        nresult = i - 1
+        if nresult<    int(sessions):
+            raise error.TestError("We couldn't expect this parallism,"
+                                  "expect %s get %s" % (sessions, nresult))
+
+        niteration = nresult / sessions
+        result = 0.0
+        for this in lines[-sessions * niteration:]:

                 logging.debug(this)

^^^ wenli, can you help to add this debug line, and provide the output
to me?

It seems that the last line is not satisfied, debug info:

22:31:14 INFO | ['Interim result:  902.01 10^6bits/s over 1.020
seconds ending at 1356060499.288\n', 'Interim result:  911.55
10^6bits/s over 1.000 seconds ending at 1356060506.301\n', 'Interim
result:  913.46 10^6bits/s over 1.000 seconds ending at
1356060507.301\n', 'Interim result:  915.07 10^6bits/s over 1.000
22:31:14 DEBUG| Interim result:  911.80 10^6bits/s over 0.941

Hello Wenli,

.....

seconds ending at 1356060528.268

^^^ problem exists here, it only happens when we take 0.5 $test_time to
wait all threads work. I never thouched this issue.

After investigation, I find this problem only existed in netperf.exhost_guest test case. Never hit this error in netperf.host_guest test case. you can have a try.


22:31:14 DEBUG| Recv   Send    Send
Utilization       Service Demand

22:31:14 ERROR| Test failed: IndexError: list index out of range


If you could not accept patches rightly, please test with
upstream code directly.


+            result += float(re.findall("Interim result: *(\S+)", this)[0])

It seem's that result not like you except, error:

Traceback (most recent call last):
  File "/usr/local/autotest-devel/client/shared/test.py", line 852,
in _call_test_function
    return func(*args, **dargs)
  File "/usr/local/autotest-devel/client/shared/test.py", line 304,
in execute
    postprocess_profiled_run, args, dargs)
  File "/usr/local/autotest-devel/client/shared/test.py", line 221,
in _call_run_once
    *args, **dargs)
  File "/usr/local/autotest-devel/client/shared/test.py", line 327,
in run_once_profiling
    self.run_once(*args, **dargs)
  File "/usr/local/autotest-devel/client/virt/virt_test.py", line
101, in run_once
    run_func(self, params, env)
  File "/usr/local/autotest-devel/client/shared/error.py", line 138,
in new_fn
    return fn(*args, **kwargs)
  File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
215, in run_netperf
    params=params, server_cyg=server_cyg, test=test)
  File "/usr/local/autotest-devel/client/shared/error.py", line 138,
in new_fn
    return fn(*args, **kwargs)
  File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
314, in start_test
    netserver_port, params, server_cyg)
  File "/usr/local/autotest-devel/client/shared/error.py", line 138,
in new_fn
    return fn(*args, **kwargs)
  File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
557, in launch_client
    ret['thu'] = parse_demo_result(fname, int(sessions))
  File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
500, in parse_demo_result
    print float(re.findall("Interim result: *(\S+)", this)[0])
IndexError: list index out of range




_______________________________________________
Autotest-kernel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/autotest-kernel

Reply via email to