This commit adds a functional test suite that utilizes lxc
containers to guarantee a non-destructive test environment.
The tests can be invoked individually, as a group of related
tests, or from automake via the standard 'make check'
command.
No tests are included as part of this commit.
Example test invocations:
Run a single test (first cd to tests/ftests):
./001-cgget-basic_cgget.py
or
./ftests.py -N 15 # Run test #015
Run a suite of tests (first cd to tests/ftests):
./ftests.py -s cgget # Run all cgget tests
Run all the tests by hand
./ftests.py
# This may be advantageous over running make check
# because it will try to re-use the same lxc
# container for all of the tests. This should
# provide a significant performance increase
Run the tests from automake
make check
# Then examine the *.trs and *.log files for
# specifics regarding each test result
Example output from a test run:
Test Results:
Run Date: Jun 03 13:41:35
Passed: 1 test
Skipped: 0 tests
Failed: 0 tests
-----------------------------------------------------------------
Timing Results:
Test Time (sec)
---------------------------------------------------------
setup 6.95
001-cgget-basic_cgget.py 0.07
teardown 0.00
---------------------------------------------------------
Total Run Time 7.02
Signed-off-by: Tom Hromatka <[email protected]>
---
tests/ftests/README.md | 65 ++++++++++++
tests/ftests/config.py | 58 +++++++++++
tests/ftests/consts.py | 6 ++
tests/ftests/ftests.py | 273 +++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 402 insertions(+)
create mode 100644 tests/ftests/README.md
create mode 100644 tests/ftests/config.py
create mode 100755 tests/ftests/ftests.py
diff --git a/tests/ftests/README.md b/tests/ftests/README.md
new file mode 100644
index 0000000..ed3dcb6
--- /dev/null
+++ b/tests/ftests/README.md
@@ -0,0 +1,65 @@
+## Functional Test Suite for libcgroup
+
+This folder contains the functional test suite for libcgroup.
+The functional test suite utilizes lxc containers to guarantee
+a non-destructive test environment.
+
+The tests can be invoked individually, as a group of related
+tests, or from automake via the standard 'make check'
+command.
+
+## Invocation
+
+Run a single test (first cd to tests/ftests):
+
+ ./001-cgget-basic_cgget.py
+ or
+ ./ftests.py -N 15 # Run test #015
+
+Run a suite of tests (first cd to tests/ftests):
+
+ ./ftests.py -s cgget # Run all cgget tests
+
+Run all the tests by hand
+
+ ./ftests.py
+ # This may be advantageous over running make check
+ # because it will try to re-use the same lxc
+ # container for all of the tests. This should
+ # provide a significant performance increase
+
+Run the tests from automake
+
+ make check
+ # Then examine the *.trs and *.log files for
+ # specifics regarding each test result
+
+## Results
+
+The test suite will generate test results upon completion of
+the test run. An example result is below:
+
+```
+Test Results:
+ Run Date: Jun 03 13:41:35
+ Passed: 1 test
+ Skipped: 0 tests
+ Failed: 0 tests
+-----------------------------------------------------------------
+Timing Results:
+ Test Time (sec)
+ ---------------------------------------------------------
+ setup 6.95
+ 001-cgget-basic_cgget.py 0.07
+ teardown 0.00
+ ---------------------------------------------------------
+ Total Run Time 7.02
+```
+
+A log file can also be generated to help in debugging failed
+tests. Run `ftests.py -h` to view the syntax.
+
+To generate a log file called foo.log at a debug level (8) run
+the following:
+
+ ./ftests.py -l 8 -L foo.log
diff --git a/tests/ftests/config.py b/tests/ftests/config.py
new file mode 100644
index 0000000..2d32aaf
--- /dev/null
+++ b/tests/ftests/config.py
@@ -0,0 +1,58 @@
+#
+# Config class for the libcgroup functional tests
+#
+# Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+# Author: Tom Hromatka <[email protected]>
+#
+
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of version 2.1 of the GNU Lesser General Public License as
+# published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+# for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, see <http://www.gnu.org/licenses>.
+#
+
+import consts
+from container import Container
+import os
+
+class Config(object):
+ def __init__(self, args, container=None):
+ self.args = args
+
+ if container:
+ self.container = container
+ else:
+ # Use the default container settings
+ self.container = Container(name=consts.DEFAULT_CONTAINER_NAME,
+ stop_timeout=args.timeout, arch=None, cfg_path=args.config,
+ distro=args.distro, release=args.release)
+
+ self.ftest_dir = os.path.dirname(os.path.abspath(__file__))
+ self.libcg_dir = os.path.dirname(self.ftest_dir)
+
+ self.test_suite = consts.TESTS_RUN_ALL_SUITES
+ self.test_num = consts.TESTS_RUN_ALL
+ self.verbose = False
+
+ def __str__(self):
+ out_str = "Configuration"
+ out_str += "\n\tcontainer = {}".format(self.container)
+
+ return out_str
+
+
+class ConfigError(Exception):
+ def __init__(self, message):
+ super(ConfigError, self).__init__(message)
+
+ def __str__(self):
+ out_str = "ConfigError:\n\tmessage = {}".format(self.message)
+ return out_str
diff --git a/tests/ftests/consts.py b/tests/ftests/consts.py
index 522cca6..9a1b92b 100644
--- a/tests/ftests/consts.py
+++ b/tests/ftests/consts.py
@@ -39,3 +39,9 @@ DEFAULT_CONTAINER_CFG_PATH=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'default.conf')
TEMP_CONTAINER_CFG_FILE='tmp.conf'
+
+TESTS_RUN_ALL = -1
+TESTS_RUN_ALL_SUITES = "allsuites"
+TEST_PASSED = "passed"
+TEST_FAILED = "failed"
+TEST_SKIPPED = "skipped"
diff --git a/tests/ftests/ftests.py b/tests/ftests/ftests.py
new file mode 100755
index 0000000..c8ec0ab
--- /dev/null
+++ b/tests/ftests/ftests.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+#
+# Main entry point for the libcgroup functional tests
+#
+# Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+# Author: Tom Hromatka <[email protected]>
+#
+
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of version 2.1 of the GNU Lesser General Public License as
+# published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+# for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, see <http://www.gnu.org/licenses>.
+#
+
+import argparse
+from cgroup import Cgroup
+from config import Config
+import consts
+import container
+import datetime
+import log
+from log import Log
+import os
+from run import Run
+import sys
+import time
+
+setup_time = 0.0
+teardown_time = 0.0
+
+def parse_args():
+ parser = argparse.ArgumentParser("Libcgroup Functional Tests")
+ parser.add_argument('-n', '--name',
+ help='name of the container',
+ required=False, type=str, default=None)
+ parser.add_argument('-f', '--config',
+ help='initial configuration file',
+ required=False, type=str, default=None)
+ parser.add_argument('-d', '--distro',
+ help='linux distribution to use as a template',
+ required=False, type=str, default=None)
+ parser.add_argument('-r', '--release',
+ help='distribution release, e.g.\'trusty\'',
+ required=False, type=str, default=None)
+ parser.add_argument('-a', '--arch',
+ help='processor architecture',
+ required=False, type=str, default=None)
+ parser.add_argument('-t', '--timeout',
+ help='wait timeout (sec) before stopping the
container',
+ required=False, type=int, default=None)
+
+ parser.add_argument('-l', '--loglevel',
+ help='log level',
+ required=False, type=int, default=None)
+ parser.add_argument('-L', '--logfile',
+ help='log file',
+ required=False, type=str, default=None)
+
+ parser.add_argument('-N', '--num',
+ help='Test number to run. If unspecified, all tests
are run',
+ required=False, default=consts.TESTS_RUN_ALL, type=int)
+ parser.add_argument('-s', '--suite',
+ help='Test suite to run, e.g. cpuset', required=False,
+ default=consts.TESTS_RUN_ALL_SUITES, type=str)
+ parser.add_argument('-u', '--unpriv',
+ help='Run the tests in an unprivileged container',
+ required=False, action="store_true")
+ parser.add_argument('-v', '--verbose',
+ help='Print all information about this test run',
+ default=True, required=False, action="store_false")
+
+ config = Config(parser.parse_args())
+
+ if config.args.loglevel:
+ log.log_level = config.args.loglevel
+ if config.args.logfile:
+ log.log_file = config.args.logfile
+ if config.args.unpriv:
+ raise ValueError('Unprivileged containers are not currently supported')
+ config.container.privileged = False
+
+ return config
+
+def setup(config, do_teardown=True, record_time=False):
+ global setup_time
+ start_time = time.time()
+ if do_teardown:
+ # belt and suspenders here. In case a previous run wasn't properly
+ # cleaned up, let's try and clean it up here
+ try:
+ teardown(config)
+ except Exception as e:
+ # log but ignore all exceptions
+ Log.log_debug(e)
+
+ config.container.create()
+
+ # make the /libcg directory in the container's rootfs
+ rootfs = config.container.rootfs()
+ container_rootfs_path = rootfs.split('=')[1].strip()
+ Run.run(['sudo', 'mkdir', os.path.join(container_rootfs_path,
+ consts.LIBCG_MOUNT_POINT)])
+
+ config.container.start()
+
+ # add the libcgroup library to the container's ld
+ echo_cmd = ['bash', '-c', 'echo {} >>
/etc/ld.so.conf.d/libcgroup.conf'.format(
+ os.path.join('/', consts.LIBCG_MOUNT_POINT, 'src/.libs'))]
+ config.container.run(echo_cmd)
+ config.container.run('ldconfig')
+ if record_time:
+ setup_time = time.time() - start_time
+
+def run_tests(config):
+ passed_tests = []
+ failed_tests = []
+ skipped_tests = []
+
+ for root, dirs, filenames in os.walk(config.ftest_dir):
+ for filename in filenames:
+ if os.path.splitext(filename)[-1] != ".py":
+ # ignore non-python files
+ continue
+
+ filenum = filename.split('-')[0]
+
+ try:
+ filenum_int = int(filenum)
+ except ValueError:
+ # D'oh. This file must not be a test. Skip it
+ Log.log_debug('Skipping {}. It doesn\'t start with an
int'.format(
+ filename))
+ continue
+
+ try:
+ filesuite = filename.split('-')[1]
+ except IndexError:
+ Log.log_error(
+ 'Skipping {}. It doesn\'t conform to the filename
format'.format(
+ filename))
+ continue
+
+ if config.args.suite == consts.TESTS_RUN_ALL_SUITES or \
+ config.args.suite == filesuite:
+ if config.args.num == consts.TESTS_RUN_ALL or \
+ config.args.num == filenum_int:
+ test = __import__(os.path.splitext(filename)[0])
+
+ failure_cause = None
+ start_time = time.time()
+ try:
+ Log.log_debug('Running test {}.'.format(filename))
+ [ret, failure_cause] = test.main(config)
+ except Exception as e:
+ # catch all exceptions. you never know when there's
+ # a crummy test
+ failure_cause = e
+ Log.log_debug(e)
+ ret = consts.TEST_FAILED
+
+ # if the test does cause an exception, it may not have
+ # cleaned up after itself. re-create the container
+ teardown(config)
+ setup(config, do_teardown=False)
+ finally:
+ run_time = time.time() - start_time
+ if ret == consts.TEST_PASSED:
+ passed_tests.append([filename, run_time])
+ elif ret == consts.TEST_FAILED:
+ failed_tests.append([filename, run_time])
+ elif ret == consts.TEST_SKIPPED:
+ skipped_tests.append([filename, run_time])
+ else:
+ raise ValueException('Unexpected ret:
{}'.format(ret))
+
+ passed_cnt = len(passed_tests)
+ failed_cnt = len(failed_tests)
+ skipped_cnt = len(skipped_tests)
+
+ print("-----------------------------------------------------------------")
+ print("Test Results:")
+ date_str = datetime.datetime.now().strftime('%b %d %H:%M:%S')
+ print('\t{}{}'.format('{0: <30}'.format("Run Date:"), '{0:
>15}'.format(date_str)))
+
+ test_str = "{} test(s)".format(passed_cnt)
+ print('\t{}{}'.format('{0: <30}'.format("Passed:"), '{0:
>15}'.format(test_str)))
+
+ test_str = "{} test(s)".format(skipped_cnt)
+ print('\t{}{}'.format('{0: <30}'.format("Skipped:"), '{0:
>15}'.format(test_str)))
+
+ test_str = "{} test(s)".format(failed_cnt)
+ print('\t{}{}'.format('{0: <30}'.format("Failed:"), '{0:
>15}'.format(test_str)))
+
+ for test in failed_tests:
+ print("\t\tTest:\t\t\t\t{} - {}".format(test[0], str(failure_cause)))
+ print("-----------------------------------------------------------------")
+
+ global setup_time
+ global teardown_time
+ if config.args.verbose:
+ print("Timing Results:")
+ print('\t{}{}'.format('{0: <30}'.format("Test"), '{0:
>15}'.format("Time (sec)")))
+ print("\t---------------------------------------------------------")
+ time_str = "{0: 2.2f}".format(setup_time)
+ print('\t{}{}'.format('{0: <30}'.format('setup'), '{0:
>15}'.format(time_str)))
+ for test in passed_tests:
+ time_str = "{0: 2.2f}".format(test[1])
+ print('\t{}{}'.format('{0: <30}'.format(test[0]), '{0:
>15}'.format(time_str)))
+ for test in failed_tests:
+ time_str = "{0: 2.2f}".format(test[1])
+ print('\t{}{}'.format('{0: <30}'.format(test[0]), '{0:
>15}'.format(time_str)))
+ time_str = "{0: 2.2f}".format(teardown_time)
+ print('\t{}{}'.format('{0: <30}'.format('teardown'), '{0:
>15}'.format(time_str)))
+
+ total_run_time = setup_time + teardown_time
+ for test in passed_tests:
+ total_run_time += test[1]
+ for test in failed_tests:
+ total_run_time += test[1]
+ total_str = "{0: 5.2f}".format(total_run_time)
+ print("\t---------------------------------------------------------")
+ print('\t{}{}'.format('{0: <30}'.format("Total Run Time"), '{0:
>15}'.format(total_str)))
+
+ return [passed_cnt, failed_cnt, skipped_cnt]
+
+def teardown(config, record_time=False):
+ global teardown_time
+ start_time = time.time()
+ try:
+ config.container.stop()
+ except Exception as e:
+ # log but ignore all exceptions
+ Log.log_debug(e)
+ try:
+ config.container.destroy()
+ except Exception as e:
+ # log but ignore all exceptions
+ Log.log_debug(e)
+
+ if record_time:
+ teardown_time = time.time() - start_time
+
+def main(config):
+ AUTOMAKE_SKIPPED = 77
+ AUTOMAKE_HARD_ERROR = 99
+ AUTOMAKE_PASSED = 0
+
+ try:
+ setup(config, record_time=True)
+ [passed_cnt, failed_cnt, skipped_cnt] = run_tests(config)
+ finally:
+ teardown(config, record_time=True)
+
+ if failed_cnt > 0:
+ return failed_cnt
+ if skipped_cnt > 0:
+ return AUTOMAKE_SKIPPED
+ if passed_cnt > 0:
+ return AUTOMAKE_PASSED
+
+ return AUTOMAKE_HARD_ERROR
+
+if __name__ == '__main__':
+ config = parse_args()
+ sys.exit(main(config))
--
1.8.3.1
_______________________________________________
Libcg-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/libcg-devel