Use funcatexit to handle the debugfs clean up in transparent hugepage test
Signed-off-by: Yiqiao Pu <[email protected]>
---
tests/trans_hugepage.py | 115 ++++++++++++++++++++++++----------------------
1 files changed, 60 insertions(+), 55 deletions(-)
diff --git a/tests/trans_hugepage.py b/tests/trans_hugepage.py
index 61689de..274de4d 100644
--- a/tests/trans_hugepage.py
+++ b/tests/trans_hugepage.py
@@ -1,7 +1,18 @@
import logging, os, re
from autotest.client.shared import error
from autotest.client.shared import utils
-from virttest import utils_test
+from virttest import utils_test, funcatexit
+
+
+def cleanup(debugfs_path, session):
+ """
+ Umount the debugfs and close the session
+ """
+ if os.path.ismount(debugfs_path):
+ utils.run("umount %s" % debugfs_path)
+ if os.path.isdir(debugfs_path):
+ os.removedirs(debugfs_path)
+ session.close()
@error.context_aware
@@ -45,76 +56,70 @@ def run_trans_hugepage(test, params, env):
vm = utils_test.get_living_vm(env, params.get("main_vm"))
session = utils_test.wait_for_login(vm, timeout=login_timeout)
- try:
- logging.info("Smoke test start")
- error.context("smoke test")
-
- nr_ah_before = int(get_mem_status('AnonHugePages', 'host'))
- if nr_ah_before <= 0:
- e_msg = 'smoke: Host is not using THP'
- logging.error(e_msg)
- failures.append(e_msg)
-
- # Protect system from oom killer
- if int(get_mem_status('MemFree', 'guest')) / 1024 < mem :
- mem = int(get_mem_status('MemFree', 'guest')) / 1024
+ funcatexit.register(env, params.get("type"), cleanup, debugfs_path,
+ session)
- session.cmd("mkdir -p %s" % mem_path)
+ logging.info("Smoke test start")
+ error.context("smoke test")
- session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem),
mem_path))
+ nr_ah_before = int(get_mem_status('AnonHugePages', 'host'))
+ if nr_ah_before <= 0:
+ e_msg = 'smoke: Host is not using THP'
+ logging.error(e_msg)
+ failures.append(e_msg)
- count = mem / 4
- session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" %
- (mem_path, count), timeout=dd_timeout)
+ # Protect system from oom killer
+ if int(get_mem_status('MemFree', 'guest')) / 1024 < mem :
+ mem = int(get_mem_status('MemFree', 'guest')) / 1024
- nr_ah_after = int(get_mem_status('AnonHugePages', 'host'))
+ session.cmd("mkdir -p %s" % mem_path)
- if nr_ah_after <= nr_ah_before:
- e_msg = ('smoke: Host did not use new THP during dd')
- logging.error(e_msg)
- failures.append(e_msg)
+ session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path))
- if debugfs_flag == 1:
- if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0:
- e_msg = 'smoke: KVM is not using THP'
- logging.error(e_msg)
- failures.append(e_msg)
+ count = mem / 4
+ session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" %
+ (mem_path, count), timeout=dd_timeout)
- logging.info("Smoke test finished")
+ nr_ah_after = int(get_mem_status('AnonHugePages', 'host'))
- # Use parallel dd as stress for memory
- count = count / 3
- logging.info("Stress test start")
- error.context("stress test")
- cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count)
- cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path
- output = session.cmd_output(cmd, timeout=dd_timeout)
+ if nr_ah_after <= nr_ah_before:
+ e_msg = ('smoke: Host did not use new THP during dd')
+ logging.error(e_msg)
+ failures.append(e_msg)
- if len(re.findall("No space", output)) > count * 0.05:
- e_msg = "stress: Too many dd instances failed in guest"
+ if debugfs_flag == 1:
+ if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0:
+ e_msg = 'smoke: KVM is not using THP'
logging.error(e_msg)
failures.append(e_msg)
- try:
- output = session.cmd('pidof dd')
- except Exception:
- output = None
+ logging.info("Smoke test finished")
- if output is not None:
- for i in re.split('\n+', output):
- session.cmd('kill -9 %s' % i)
+ # Use parallel dd as stress for memory
+ count = count / 3
+ logging.info("Stress test start")
+ error.context("stress test")
+ cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count)
+ cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path
+ output = session.cmd_output(cmd, timeout=dd_timeout)
+
+ if len(re.findall("No space", output)) > count * 0.05:
+ e_msg = "stress: Too many dd instances failed in guest"
+ logging.error(e_msg)
+ failures.append(e_msg)
+
+ try:
+ output = session.cmd('pidof dd')
+ except Exception:
+ output = None
- session.cmd("umount %s" % mem_path)
+ if output is not None:
+ for i in re.split('\n+', output):
+ session.cmd('kill -9 %s' % i)
- logging.info("Stress test finished")
+ session.cmd("umount %s" % mem_path)
- finally:
- error.context("all tests cleanup")
- if os.path.ismount(debugfs_path):
- utils.run("umount %s" % debugfs_path)
- if os.path.isdir(debugfs_path):
- os.removedirs(debugfs_path)
- session.close()
+ logging.info("Stress test finished")
error.context("")
if failures:
--
1.7.7.6
_______________________________________________
Virt-test-devel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/virt-test-devel