jenkins-bot has submitted this change and it was merged. ( 
https://gerrit.wikimedia.org/r/338808 )

Change subject: Package metadata and testing tools improvements
......................................................................


Package metadata and testing tools improvements

- fill setup.py with all the parameters, suitable for a future
  submission to PyPI
- autodetect version from Git tags and expose it in the module using
  setuptools_scm
  - add a --version option to the CLI
- use pytest to run the tests
- make tox use the dependencies in setup.py, removing the now
  unnecessary requirements files.
- add security analyzer Bandit to tox.
- add Prospector to tox, that in turns runs multiple additional tools:
  dodgy, mccabe, pep257, pep8, profile-validator, pyflakes, pylint,
  pyroma, vulture.

Bug: T154588
Change-Id: I08214305bc9337267f4cf904b9acf9ecb8d5d488
---
A .coveragerc
M .gitignore
M .travis.yml
M cumin/__init__.py
M cumin/cli.py
A cumin/tests/integration/__init__.py
A cumin/tests/integration/conftest.py
M cumin/tests/integration/test_cli.py
M cumin/tests/integration/transports/clustershell.sh
M cumin/tests/unit/test_cli.py
M cumin/tests/vulture_whitelist.py
A prospector.yaml
A pytest.ini
D requirements-tests.txt
D requirements.txt
A setup.cfg
M setup.py
M tox.ini
18 files changed, 362 insertions(+), 303 deletions(-)

Approvals:
  Giuseppe Lavagetto: Looks good to me, but someone else must approve
  jenkins-bot: Verified
  Volans: Looks good to me, approved



diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..d5b0806
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,3 @@
+[run]
+branch = True
+omit = cumin/tests/*
diff --git a/.gitignore b/.gitignore
index 83769f1..ce3f261 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,12 @@
-/.eggs
-/.tox
-/*.egg-info
+/build/
+/.cache/
 /.coverage
-/setup.cfg
-/.venv
-/logs
+/.coverage-integration-clustershell
+/.eggs/
+/.tox/
+/.venv/
+/dist/
+/logs/
+/*.egg-info/
+__pycache__/
 *.pyc
diff --git a/.travis.yml b/.travis.yml
index e1885f2..e18b5f3 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,13 +2,9 @@
 python: 2.7
 before_install:
   - pip install --upgrade setuptools
-
 install:
-  - pip install --upgrade tox
-  - pip install --upgrade "clustershell==1.7.3" colorama "pyparsing==2.1.10" 
pyyaml requests tqdm
-  - pip install --upgrade mock nose requests-mock
-  - pip install --upgrade coveralls codecov
-script: coverage run --source=cumin --omit="cumin/tests/*" setup.py test
+    - pip install --upgrade coveralls codecov tox
+script: tox -e unit
 after_success:
   - coveralls
   - codecov
diff --git a/cumin/__init__.py b/cumin/__init__.py
index 9d32c90..9e70e4c 100644
--- a/cumin/__init__.py
+++ b/cumin/__init__.py
@@ -1,4 +1,11 @@
 """Automation and orchestration framework written in Python."""
+from pkg_resources import DistributionNotFound, get_distribution
+
+
+try:
+    __version__ = get_distribution(__name__).version
+except DistributionNotFound:
+    pass  # package is not installed
 
 
 class CuminError(Exception):
diff --git a/cumin/cli.py b/cumin/cli.py
index 2f75fa3..ae88a28 100644
--- a/cumin/cli.py
+++ b/cumin/cli.py
@@ -17,7 +17,8 @@
 from ClusterShell.NodeSet import NodeSet
 from tqdm import tqdm
 
-from cumin import CuminError
+import cumin
+
 from cumin.query import QueryBuilder
 from cumin.transport import Transport
 from cumin.transports import Command
@@ -49,7 +50,7 @@
 """
 
 
-class KeyboardInterruptError(CuminError):
+class KeyboardInterruptError(cumin.CuminError):
     """Custom KeyboardInterrupt exception class for the SIGINT signal 
handler."""
 
 
@@ -109,6 +110,7 @@
                               '[optional]'))
     parser.add_argument('--dry-run', action='store_true',
                         help='Do not execute any command, just return the list 
of matching hosts and exit.')
+    parser.add_argument('--version', action='store_true', help='Print current 
version and exit.')
     parser.add_argument('-d', '--debug', action='store_true', help='Set log 
level to DEBUG.')
     parser.add_argument('-i', '--interactive', action='store_true', help='Drop 
into a Python shell with the results.')
     parser.add_argument('hosts', metavar='HOSTS_QUERY', help='Hosts selection 
query')
@@ -140,9 +142,9 @@
 def get_running_user():
     """Ensure it's running as root and that the original user is detected and 
return it."""
     if os.getenv('USER') != 'root':
-        raise CuminError('Insufficient privileges, run with sudo')
+        raise cumin.CuminError('Insufficient privileges, run with sudo')
     if os.getenv('SUDO_USER') in (None, 'root'):
-        raise CuminError('Unable to determine real user, logged in as root?')
+        raise cumin.CuminError('Unable to determine real user, logged in as 
root?')
 
     return os.getenv('SUDO_USER')
 
@@ -181,13 +183,13 @@
         with open(config_file, 'r') as f:
             config = yaml.safe_load(f)
     except IOError as e:
-        raise CuminError('Unable to read configuration file: 
{message}'.format(message=e))
+        raise cumin.CuminError('Unable to read configuration file: 
{message}'.format(message=e))
     except yaml.parser.ParserError as e:
-        raise CuminError("Unable to parse configuration file 
'{config}':\n{message}".format(
+        raise cumin.CuminError("Unable to parse configuration file 
'{config}':\n{message}".format(
             config=config_file, message=e))
 
     if config is None:
-        raise CuminError("Empty configuration found in 
'{config}'".format(config=config_file))
+        raise cumin.CuminError("Empty configuration found in 
'{config}'".format(config=config_file))
 
     return config
 
@@ -270,7 +272,7 @@
     elif not sys.stdout.isatty():  # pylint: disable=no-member
         message = 'Not in a TTY but neither DRY-RUN nor FORCE mode were 
specified.'
         stderr(message)
-        raise CuminError(message)
+        raise cumin.CuminError(message)
 
     for i in xrange(10):
         stderr('Confirm to continue [y/n]?', end=' ')
@@ -368,10 +370,14 @@
     # Setup
     try:
         args = parse_args(argv)
+        if args.version:
+            tqdm.write('cumin {version}'.format(version=cumin.__version__))
+            return 0
+
         user = get_running_user()
         config = parse_config(args.config)
         setup_logging(config['log_file'], debug=args.debug)
-    except CuminError as e:
+    except cumin.CuminError as e:
         stderr(e)
         return 2
     except Exception as e:  # pylint: disable=broad-except
diff --git a/cumin/tests/integration/__init__.py 
b/cumin/tests/integration/__init__.py
new file mode 100644
index 0000000..c210fac
--- /dev/null
+++ b/cumin/tests/integration/__init__.py
@@ -0,0 +1 @@
+"""Integration tests."""
diff --git a/cumin/tests/integration/conftest.py 
b/cumin/tests/integration/conftest.py
new file mode 100644
index 0000000..e88b39e
--- /dev/null
+++ b/cumin/tests/integration/conftest.py
@@ -0,0 +1,12 @@
+"""Pytest customization for integration tests."""
+import pytest
+
+
[email protected](hookwrapper=True)
+def pytest_runtest_makereport(item, call):  # pylint: disable=unused-argument
+    """If a custom variant_params marker is set, print a section with its 
content."""
+    outcome = yield
+    marker = item.get_marker('variant_params')
+    if marker:
+        rep = outcome.get_result()
+        rep.sections.insert(0, ('test_variant parameters', marker.args))
diff --git a/cumin/tests/integration/test_cli.py 
b/cumin/tests/integration/test_cli.py
index 23b6e96..d2c0e84 100644
--- a/cumin/tests/integration/test_cli.py
+++ b/cumin/tests/integration/test_cli.py
@@ -1,14 +1,13 @@
-"""CLI tests."""
+"""CLI integration tests."""
+# pylint: disable=protected-access
 from __future__ import print_function
 
 import copy
 import os
 import re
 import sys
-import unittest
 
-from functools import wraps
-from StringIO import StringIO
+import pytest
 
 from cumin import cli
 
@@ -92,7 +91,7 @@
     {'rc': None, 'additional_params': ['--global-timeout', '1'], 'commands': 
['sleep 0.99', 'date']},
     {'rc': 2, 'additional_params': ['-t', '1'], 'commands': ['sleep 2', 
'date'],
      'assert_false': ['failed', 'global_timeout', 'date_success']},
-    {'rc': 0, 'additional_params': ['-t', '1'], 'commands': ['sleep 0.5', 
'date'],
+    {'rc': 0, 'additional_params': ['-t', '2'], 'commands': ['sleep 0.5', 
'date'],
      'assert_false': ['failed', 'global_timeout']},
 )
 
@@ -113,67 +112,34 @@
 )
 
 
-def capture_output(func):
-    """Decorator to capture stdout and stderr of a test run and pass it to the 
test method.
-
-    Arguments
-    func -- the function to be decorated
-    """
-    @wraps(func)
-    def func_wrapper(self, *args, **kwargs):
-        """The actual sdtout/stderr capturer.
-
-        Arguments
-        self -- the 'self' of the decorated method.
-        """
-        try:
-            stdout = sys.stdout
-            stderr = sys.stderr
-            out = StringIO()
-            err = StringIO()
-            sys.stdout = out
-            sys.stderr = err
-            args += (out, err)
-            func(self, *args, **kwargs)
-        except AssertionError:
-            # Print both stderr and stdout to the original stdout captured by 
nose
-            print(err.getvalue(), file=stdout)
-            print(out.getvalue(), file=stdout)
-            raise
-        finally:
-            # Restore original stdout/stderr
-            sys.stdout = stdout
-            sys.stderr = stderr
-
-    return func_wrapper
-
-
 def make_method(name, commands_set):
     """Method generator with a dynamic name and docstring."""
     params = copy.deepcopy(commands_set)  # Needed to have a different one for 
each method
 
-    @capture_output
-    def test_variant(self, stdout, stderr):
+    @pytest.mark.variant_params(params)
+    def test_variant(self, capsys):
         """Test variant generated function"""
         argv = self.default_params + params['params'] + [self.all_nodes] + 
params['commands']
         rc = cli.main(argv=argv)
-        err = stderr.getvalue()
+        out, err = capsys.readouterr()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
 
         if params['rc'] is None:
-            params['rc'] = self._get_rc(params)
+            params['rc'] = get_rc(params)
 
-        self.assertEqual(rc, params['rc'])
-        self.assertIn(_EXPECTED_LINES['all_targeted'], err, 
msg=_EXPECTED_LINES['all_targeted'])
+        assert rc == params['rc']
+        assert _EXPECTED_LINES['all_targeted'] in err, 
_EXPECTED_LINES['all_targeted']
 
         labels = params.get('assert_true', [])
-        labels += self._get_global_timeout_expected_lines(params)
+        labels += get_global_timeout_expected_lines(params)
 
         if 'async' in params['params']:
             mode = 'async'
         else:
             mode = 'sync'
-            labels += (self._get_ls_expected_lines(params) + 
self._get_date_expected_lines(params) +
-                       self._get_timeout_expected_lines(params))
+            labels += (get_ls_expected_lines(params) + 
get_date_expected_lines(params) +
+                       get_timeout_expected_lines(params))
 
         for label in labels:
             if label in ('all_success', 'all_failure') and '-p' in 
params['params']:
@@ -185,12 +151,12 @@
                 string = _EXPECTED_LINES[label]
 
             if label.endswith('_re'):
-                self.assertIsNotNone(re.search(string, err), msg=string)
+                assert re.search(string, err) is not None, string
             else:
-                self.assertIn(string, err, msg=string)
+                assert string in err, string
 
         for label in params.get('assert_false', []):
-            self.assertNotIn(_EXPECTED_LINES[label], err, 
msg=_EXPECTED_LINES[label])
+            assert _EXPECTED_LINES[label] not in err, _EXPECTED_LINES[label]
 
     # Dynamically set the name and docstring of the generated function to 
distinguish them
     test_variant.__name__ = 'test_variant_{name}'.format(name=name)
@@ -202,6 +168,7 @@
 def add_variants_methods(indexes):
     """Decorator to add generated tests to a TestClass subclass."""
     def func_wrapper(cls):
+        """Dynamic test generator."""
         for i in indexes:
             for j, commands_set in enumerate(_VARIANTS_COMMANDS):
                 commands_set['params'] = _VARIANTS_PARAMETERS[i] + 
commands_set.get('additional_params', [])
@@ -214,16 +181,127 @@
     return func_wrapper
 
 
+def get_rc(params):
+    """Return the expected return code based on the parameters.
+
+    Arguments:
+    params -- a dictionary with all the parameters passed to the 
variant_function
+    """
+    return_value = 2
+    if '-p' in params['params'] and '--global-timeout' not in params['params']:
+        return_value = 1
+
+    return return_value
+
+
+def get_global_timeout_expected_lines(params):  # pylint: disable=invalid-name
+    """Return a list of expected lines labels for global timeout-based tests.
+
+    Arguments:
+    params -- a dictionary with all the parameters passed to the 
variant_function
+    """
+    expected = []
+    if '--global-timeout' not in params['params']:
+        return expected
+
+    if '-p' in params['params']:
+        expected = ['global_timeout_executing_threshold_re', 
'global_timeout_pending_threshold_re']
+    else:
+        expected = ['global_timeout_executing_re', 'global_timeout_pending_re']
+
+    return expected
+
+
+def get_timeout_expected_lines(params):
+    """Return a list of expected lines labels for timeout-based tests.
+
+    Arguments:
+    params -- a dictionary with all the parameters passed to the 
variant_function
+    """
+    expected = []
+    if '-t' not in params['params']:
+        return expected
+
+    if params['rc'] == 0:
+        # Test successful cases
+        if '-p' in params['params']:
+            expected = ['sleep_success_threshold', 'date_success_threshold']
+        else:
+            expected = ['date_success', 'sleep_success']
+    else:
+        # Test timeout cases
+        if '--batch-size' in params['params']:
+            expected = ['sleep_timeout_threshold_re']
+        else:
+            expected = ['sleep_timeout']
+
+    return expected
+
+
+def get_date_expected_lines(params):
+    """Return a list of expected lines labels for the date command based on 
parameters.
+
+    Arguments:
+    params -- a dictionary with all the parameters passed to the 
variant_function
+    """
+    expected = []
+    if 'ls -la /tmp/non_existing' in params['commands']:
+        return expected
+
+    if '-p' in params['params']:
+        if 'ls -la /tmp/maybe' in params['commands']:
+            expected = ['date_success_threshold_partial']
+        elif 'ls -la /tmp' in params['commands']:
+            expected = ['date_success_threshold']
+    elif 'ls -la /tmp' in params['commands']:
+        expected = ['date_success']
+
+    return expected
+
+
+def get_ls_expected_lines(params):
+    """Return a list of expected lines labels for the ls command based on the 
parameters.
+
+    Arguments:
+    params -- a dictionary with all the parameters passed to the 
variant_function
+    """
+    expected = []
+    if 'ls -la /tmp' in params['commands']:
+        if '-p' in params['params']:
+            expected = ['ls_success_threshold']
+        else:
+            expected = ['ls_success']
+    elif 'ls -la /tmp/maybe' in params['commands']:
+        if '-p' in params['params']:
+            expected = ['ls_partial_success', 
'ls_partial_success_threshold_ratio']
+        else:
+            expected = ['ls_partial_success', 'ls_partial_success_ratio_re']
+    elif 'ls -la /tmp/non_existing' in params['commands']:
+        if '--batch-size' in params['params']:
+            if '-p' in params['params']:
+                expected.append('ls_failure_batch_threshold')
+            else:
+                expected.append('ls_failure_batch')
+        else:
+            expected.append('ls_total_failure')
+
+        if '-p' in params['params']:
+            expected.append('ls_total_failure_threshold_ratio')
+        else:
+            expected.append('ls_total_failure_ratio')
+
+    return expected
+
+
 @add_variants_methods(xrange(len(_VARIANTS_PARAMETERS)))
-class TestCLI(unittest.TestCase):
+class TestCLI(object):
     """CLI module tests."""
 
-    _multiprocess_can_split_ = True
-
-    def setUp(self):
+    def setup_method(self, _):
         """Set default properties."""
+        # pylint: disable=attribute-defined-outside-init
         self.identifier = os.getenv('CUMIN_IDENTIFIER')
-        self.assertIsNotNone(self.identifier, msg='Unable to find 
CUMIN_IDENTIFIER environmental variable')
+        assert self.identifier is not None, 'Unable to find CUMIN_IDENTIFIER 
environmental variable'
         self.config = os.path.join(os.getenv('CUMIN_TMPDIR', ''), 
'config.yaml')
         self.default_params = ['--force', '-d', '-c', self.config]
         self.nodes_prefix = '{identifier}-'.format(identifier=self.identifier)
@@ -237,169 +315,64 @@
         """
         if nodes is None:
             return self.all_nodes
-        else:
-            return '{prefix}[{nodes}]'.format(prefix=self.nodes_prefix, 
nodes=nodes)
 
-    def _get_rc(self, params):
-        """Return the expected return code based on the parameters.
+        return '{prefix}[{nodes}]'.format(prefix=self.nodes_prefix, 
nodes=nodes)
 
-        Arguments:
-        params -- a dictionary with all the parameters passed to the 
variant_function
-        """
-        return_value = 2
-        if '-p' in params['params'] and '--global-timeout' not in 
params['params']:
-            return_value = 1
-
-        return return_value
-
-    def _get_global_timeout_expected_lines(self, params):
-        """Return a list of expected lines labels for global timeout-based 
tests.
-
-        Arguments:
-        params -- a dictionary with all the parameters passed to the 
variant_function
-        """
-        expected = []
-        if '--global-timeout' not in params['params']:
-            return expected
-
-        if '-p' in params['params']:
-            expected = ['global_timeout_executing_threshold_re', 
'global_timeout_pending_threshold_re']
-        else:
-            expected = ['global_timeout_executing_re', 
'global_timeout_pending_re']
-
-        return expected
-
-    def _get_timeout_expected_lines(self, params):
-        """Return a list of expected lines labels for timeout-based tests.
-
-        Arguments:
-        params -- a dictionary with all the parameters passed to the 
variant_function
-        """
-        expected = []
-        if '-t' not in params['params']:
-            return expected
-
-        if params['rc'] == 0:
-            # Test successful cases
-            if '-p' in params['params']:
-                expected = ['sleep_success_threshold', 
'date_success_threshold']
-            else:
-                expected = ['date_success', 'sleep_success']
-        else:
-            # Test timeout cases
-            if '--batch-size' in params['params']:
-                expected = ['sleep_timeout_threshold_re']
-            else:
-                expected = ['sleep_timeout']
-
-        return expected
-
-    def _get_date_expected_lines(self, params):
-        """Return a list of expected lines labels for the date command based 
on parameters.
-
-        Arguments:
-        params -- a dictionary with all the parameters passed to the 
variant_function
-        """
-        expected = []
-        if 'ls -la /tmp/non_existing' in params['commands']:
-            return expected
-
-        if '-p' in params['params']:
-            if 'ls -la /tmp/maybe' in params['commands']:
-                expected = ['date_success_threshold_partial']
-            elif 'ls -la /tmp' in params['commands']:
-                expected = ['date_success_threshold']
-        elif 'ls -la /tmp' in params['commands']:
-            expected = ['date_success']
-
-        return expected
-
-    def _get_ls_expected_lines(self, params):
-        """Return a list of expected lines labels for the ls command based on 
the parameters.
-
-        Arguments:
-        params -- a dictionary with all the parameters passed to the 
variant_function
-        """
-        expected = []
-        if 'ls -la /tmp' in params['commands']:
-            if '-p' in params['params']:
-                expected = ['ls_success_threshold']
-            else:
-                expected = ['ls_success']
-        elif 'ls -la /tmp/maybe' in params['commands']:
-            if '-p' in params['params']:
-                expected = ['ls_partial_success', 
'ls_partial_success_threshold_ratio']
-            else:
-                expected = ['ls_partial_success', 
'ls_partial_success_ratio_re']
-        elif 'ls -la /tmp/non_existing' in params['commands']:
-            if '--batch-size' in params['params']:
-                if '-p' in params['params']:
-                    expected.append('ls_failure_batch_threshold')
-                else:
-                    expected.append('ls_failure_batch')
-            else:
-                expected.append('ls_total_failure')
-
-            if '-p' in params['params']:
-                expected.append('ls_total_failure_threshold_ratio')
-            else:
-                expected.append('ls_total_failure_ratio')
-
-        return expected
-
-    @capture_output
-    def test_single_command_subfanout(self, stdout, stderr):
+    def test_single_command_subfanout(self, capsys):
         """Executing one command on a subset of nodes smaller than the 
ClusterShell fanout."""
         params = [self._get_nodes('1-2'), 'date']
         rc = cli.main(argv=self.default_params + params)
-        err = stderr.getvalue()
-        self.assertIn(_EXPECTED_LINES['subfanout_targeted'], err, 
msg=_EXPECTED_LINES['subfanout_targeted'])
-        self.assertIn(_EXPECTED_LINES['date_success_subfanout'], err, 
msg=_EXPECTED_LINES['date_success_subfanout'])
-        self.assertIn(_EXPECTED_LINES['all_success_subfanout'], err, 
msg=_EXPECTED_LINES['all_success_subfanout'])
-        self.assertNotIn(_EXPECTED_LINES['failed'], err, 
msg=_EXPECTED_LINES['failed'])
-        self.assertNotIn(_EXPECTED_LINES['global_timeout'], err, 
msg=_EXPECTED_LINES['global_timeout'])
-        self.assertEqual(rc, 0)
+        out, err = capsys.readouterr()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+        assert _EXPECTED_LINES['subfanout_targeted'] in err, 
_EXPECTED_LINES['subfanout_targeted']
+        assert _EXPECTED_LINES['date_success_subfanout'] in err, 
_EXPECTED_LINES['date_success_subfanout']
+        assert _EXPECTED_LINES['all_success_subfanout'] in err, 
_EXPECTED_LINES['all_success_subfanout']
+        assert _EXPECTED_LINES['failed'] not in err, _EXPECTED_LINES['failed']
+        assert _EXPECTED_LINES['global_timeout'] not in err, 
_EXPECTED_LINES['global_timeout']
+        assert rc == 0
 
-    @capture_output
-    def test_single_command_supfanout(self, stdout, stderr):
+    def test_single_command_supfanout(self, capsys):
         """Executing one command on a subset of nodes greater than the 
ClusterShell fanout."""
         params = [self.all_nodes, 'date']
         rc = cli.main(argv=self.default_params + params)
-        err = stderr.getvalue()
-        self.assertIn(_EXPECTED_LINES['all_targeted'], err, 
msg=_EXPECTED_LINES['all_targeted'])
-        self.assertIn(_EXPECTED_LINES['date_success'], err, 
msg=_EXPECTED_LINES['date_success'])
-        self.assertIn(_EXPECTED_LINES['all_success'], err, 
msg=_EXPECTED_LINES['all_success'])
-        self.assertNotIn(_EXPECTED_LINES['failed'], err, 
msg=_EXPECTED_LINES['failed'])
-        self.assertNotIn(_EXPECTED_LINES['global_timeout'], err, 
msg=_EXPECTED_LINES['global_timeout'])
-        self.assertEqual(rc, 0)
+        out, err = capsys.readouterr()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+        assert _EXPECTED_LINES['all_targeted'] in err, 
_EXPECTED_LINES['all_targeted']
+        assert _EXPECTED_LINES['date_success'] in err, 
_EXPECTED_LINES['date_success']
+        assert _EXPECTED_LINES['all_success'] in err, 
_EXPECTED_LINES['all_success']
+        assert _EXPECTED_LINES['failed'] not in err, _EXPECTED_LINES['failed']
+        assert _EXPECTED_LINES['global_timeout'] not in err, 
_EXPECTED_LINES['global_timeout']
+        assert rc == 0
 
-    @capture_output
-    def test_dry_run(self, stdout, stderr):
+    def test_dry_run(self, capsys):
         """With --dry-run only the matching hosts are printed."""
         params = ['--dry-run', self.all_nodes, 'date']
         rc = cli.main(argv=self.default_params + params)
-        err = stderr.getvalue()
-        self.assertIn(_EXPECTED_LINES['all_targeted'], err, 
msg=_EXPECTED_LINES['all_targeted'])
-        self.assertIn(_EXPECTED_LINES['dry_run'], err, 
msg=_EXPECTED_LINES['dry_run'])
-        self.assertNotIn(_EXPECTED_LINES['successfully'], err, 
msg=_EXPECTED_LINES['successfully'])
-        self.assertNotIn(_EXPECTED_LINES['failed'], err, 
msg=_EXPECTED_LINES['failed'])
-        self.assertNotIn(_EXPECTED_LINES['global_timeout'], err, 
msg=_EXPECTED_LINES['global_timeout'])
-        self.assertEqual(rc, 0)
+        out, err = capsys.readouterr()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+        assert _EXPECTED_LINES['all_targeted'] in err, 
_EXPECTED_LINES['all_targeted']
+        assert _EXPECTED_LINES['dry_run'] in err, _EXPECTED_LINES['dry_run']
+        assert _EXPECTED_LINES['successfully'] not in err, 
_EXPECTED_LINES['successfully']
+        assert _EXPECTED_LINES['failed'] not in err, _EXPECTED_LINES['failed']
+        assert _EXPECTED_LINES['global_timeout'] not in err, 
_EXPECTED_LINES['global_timeout']
+        assert rc == 0
 
-    @capture_output
-    def test_global_timeout(self, stdout, stderr):
-        """With a global timeout shorter than a command it should fail."""
+    def test_timeout(self, capsys):
+        """With a timeout shorter than a command it should fail."""
         params = ['--global-timeout', '1', self.all_nodes, 'sleep 2']
         rc = cli.main(argv=self.default_params + params)
-        err = stderr.getvalue()
-        self.assertIn(_EXPECTED_LINES['all_targeted'], err, 
msg=_EXPECTED_LINES['all_targeted'])
-        self.assertIsNotNone(
-            re.search(_EXPECTED_LINES['global_timeout_executing_re'], err),
-            msg=_EXPECTED_LINES['global_timeout_executing_re'])
-        self.assertIsNotNone(
-            re.search(_EXPECTED_LINES['global_timeout_pending_re'], err),
-            msg=_EXPECTED_LINES['global_timeout_pending_re'])
-        self.assertIn(_EXPECTED_LINES['sleep_total_failure'], err, 
msg=_EXPECTED_LINES['sleep_total_failure'])
-        self.assertIn(_EXPECTED_LINES['all_failure'], err, 
msg=_EXPECTED_LINES['all_failure'])
-        self.assertNotIn(_EXPECTED_LINES['failed'], err, 
msg=_EXPECTED_LINES['failed'])
-        self.assertEqual(rc, 2)
+        out, err = capsys.readouterr()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+        assert _EXPECTED_LINES['all_targeted'] in err, 
_EXPECTED_LINES['all_targeted']
+        assert re.search(_EXPECTED_LINES['global_timeout_executing_re'], err) 
is not None, \
+            _EXPECTED_LINES['global_timeout_executing_re']
+        assert re.search(_EXPECTED_LINES['global_timeout_pending_re'], err) is 
not None, \
+            _EXPECTED_LINES['global_timeout_pending_re']
+        assert _EXPECTED_LINES['sleep_total_failure'] in err, 
_EXPECTED_LINES['sleep_total_failure']
+        assert _EXPECTED_LINES['all_failure'] in err, 
_EXPECTED_LINES['all_failure']
+        assert _EXPECTED_LINES['failed'] not in err, _EXPECTED_LINES['failed']
+        assert rc == 2
diff --git a/cumin/tests/integration/transports/clustershell.sh 
b/cumin/tests/integration/transports/clustershell.sh
index 10be337..24fe46e 100755
--- a/cumin/tests/integration/transports/clustershell.sh
+++ b/cumin/tests/integration/transports/clustershell.sh
@@ -46,10 +46,5 @@
 
 function run_tests() {
     USER=root SUDO_USER=user cumin --force -c "${CUMIN_TMPDIR}/config.yaml" 
"${CUMIN_IDENTIFIER}-[1-2,5]" "touch /tmp/maybe" > /dev/null 2>&1
-    # Nose multiprocess and coverage don't work together
-    # export NOSE_PROCESS_TIMEOUT=60
-    # export NOSE_PROCESSES=-1
-    export NOSE_IGNORE_CONFIG_FILES=1
-    coverage run --source cumin --omit=cumin/tests/* -m nose 
cumin/tests/integration/test_cli.py
-    coverage report -m
+    py.test -n auto --strict --cov-report term-missing --cov=cumin 
cumin/tests/integration
 }
diff --git a/cumin/tests/unit/test_cli.py b/cumin/tests/unit/test_cli.py
index 29d74a8..4554c18 100644
--- a/cumin/tests/unit/test_cli.py
+++ b/cumin/tests/unit/test_cli.py
@@ -1,13 +1,10 @@
 """CLI tests."""
 
 import os
-import sys
 import tempfile
 import unittest
 
-from functools import wraps
 from logging import DEBUG, INFO
-from StringIO import StringIO
 
 import mock
 
@@ -17,31 +14,6 @@
 _ENV = {'USER': 'root', 'SUDO_USER': 'user'}
 # Command line arguments
 _ARGV = ['-c', 'doc/examples/config.yaml', '-d', '-m', 'sync', 'host', 
'command1', 'command2']
-
-
-def capture_stderr(func):
-    """Decorator to capture stderr while running a test method.
-
-    Arguments
-    func -- the function to be decorated
-    """
-    @wraps(func)
-    def func_wrapper(self):
-        """The actual stderr capturer.
-
-        Arguments
-        self -- the 'self' of the decorated method.
-        """
-        # Mask stderr because ArgumentParser error print directly to stderr 
and nosetest doesn't capture it.
-        stderr = sys.stderr
-        try:
-            err = StringIO()
-            sys.stderr = err
-            func(self)
-        finally:
-            sys.stderr = stderr
-
-    return func_wrapper
 
 
 class TestCLI(unittest.TestCase):
@@ -71,7 +43,6 @@
         args = cli.parse_args(argv=_ARGV[:-2])
         self._validate_parsed_args(args, no_commands=True)
 
-    @capture_stderr
     def test_parse_args_no_mode(self):
         """If mode is not speficied with multiple commands, parsing the args 
should raise a parser error."""
         index = _ARGV.index('-m')
diff --git a/cumin/tests/vulture_whitelist.py b/cumin/tests/vulture_whitelist.py
index 355fecd..255aca2 100644
--- a/cumin/tests/vulture_whitelist.py
+++ b/cumin/tests/vulture_whitelist.py
@@ -14,3 +14,10 @@
 
 whitelist_cli = Whitelist()
 whitelist_cli.run.h
+
+whitelist_tests_integration_conftest = Whitelist()
+whitelist_tests_integration_conftest.pytest_cmdline_preparse
+whitelist_tests_integration_conftest.pytest_runtest_makereport
+
+whitelist_tests_integration_test_cli_TestCLI = Whitelist()
+whitelist_tests_integration_test_cli_TestCLI.setup_method
diff --git a/prospector.yaml b/prospector.yaml
new file mode 100644
index 0000000..cf93a96
--- /dev/null
+++ b/prospector.yaml
@@ -0,0 +1,40 @@
+strictness: high
+inherits:
+  - strictness_high
+
+doc-warnings: true
+member-warnings: true
+test-warnings: true
+
+autodetect: false
+output-format: grouped
+
+pep8:
+  full: true
+  options:
+    max-line-length: 120
+
+pep257:
+  disable:
+    - D203  # 1 blank line required before class docstring, D211 (after) is 
enforce instead
+    - D213  # Multi-line docstring summary should start at the second line, 
D212 (first line) is enforced instead
+
+pylint:
+  disable:
+    - logging-format-interpolation  # format() is used for coherence across 
the project
+  options:
+    ignore: vulture_whitelist.py
+    max-line-length: 120
+    max-args: 6
+    max-attributes: 14
+    max-locals: 16
+    include-naming-hint: true
+    variable-rgx: (([a-z][a-z0-9_]{0,30})|(_[a-z0-9_]*))$
+    variable-name-hint: (([a-z][a-z0-9_]{0,30})|(_[a-z0-9_]*))$
+
+
+pyroma:
+  run: true
+
+vulture:
+  run: true
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..8a2c65b
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+markers =
+    variant_params: test_cli.py variant parameters.
diff --git a/requirements-tests.txt b/requirements-tests.txt
deleted file mode 100644
index 90862ac..0000000
--- a/requirements-tests.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-coverage
-mock
-nose
-requests-mock
-tox
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index f3c4ae1..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-clustershell==1.7.3
-colorama
-pyparsing==2.1.10
-pyyaml
-requests
-tqdm
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..b7e4789
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[aliases]
+test=pytest
diff --git a/setup.py b/setup.py
index 623ae4b..28f71e6 100644
--- a/setup.py
+++ b/setup.py
@@ -3,21 +3,72 @@
 
 from setuptools import find_packages, setup
 
+
+long_description = """
+Cumin provides a flexible and scalable automation framework to execute 
multiple commands on
+multiple targets in parallel.
+It allows to easily perform complex selections of hosts through a 
user-friendly query language which can interface
+with different backend modules.
+The transport layer can also be selected, providing multiple execution 
strategies.
+It can be used both via its command line interface (CLI) and as a Python 
library.
+"""
+
+install_requires = [
+    'clustershell==1.7.3',
+    'colorama>=0.3.2',
+    'pyparsing==2.1.10',
+    'pyyaml>=3.11',
+    'requests>=2.12.0',
+    'tqdm>=4.11.2',
+]
+
+tests_require = [
+    'bandit>=0.13.2',
+    'flake8>=3.2.1',
+    'mock>=2.0.0',
+    'pytest>=3.0.3',
+    'pytest-cov>=1.8.0',
+    'pytest-xdist>=1.15.0',
+    'requests-mock>=0.7.0',
+    'tox>=2.5.0',
+]
+
 setup(
-    name='cumin',
-    version='0.0.2',
-    description='Cumin - An automation and orchestration framework',
     author='Riccardo Coccioli',
     author_email='[email protected]',
-    url='https://github.com/wikimedia/operations-software-cumin',
-    install_requires=['clustershell==1.7.3', 'colorama', 'pyparsing==2.1.10', 
'pyyaml', 'requests', 'tqdm'],
-    test_suite='nose.collector',
-    tests_require=['mock', 'nose', 'requests-mock'],
-    zip_safe=False,
-    packages=find_packages(),
+    classifiers=[
+        'Development Status :: 5 - Production/Stable',
+        'Environment :: Console',
+        'Intended Audience :: System Administrators',
+        'License :: OSI Approved :: GNU General Public License v3 or later 
(GPLv3+)',
+        'Operating System :: MacOS :: MacOS X',
+        'Operating System :: POSIX :: BSD',
+        'Operating System :: POSIX :: Linux',
+        'Programming Language :: Python :: 2 :: Only',
+        'Programming Language :: Python :: 2.7',
+        'Topic :: Software Development :: Libraries :: Python Modules',
+        'Topic :: System :: Clustering',
+        'Topic :: System :: Distributed Computing',
+        'Topic :: System :: Systems Administration',
+    ],
+    data_files=['doc/examples/config.yaml'],
+    description='Automation and orchestration framework written in Python',
     entry_points={
         'console_scripts': [
             'cumin = cumin.cli:main',
         ],
     },
+    extras_require={'tests': tests_require + 
['prospector[with_everything]>=0.12.4']},
+    install_requires=install_requires,
+    keywords=['cumin', 'automation framework', 'orchestration framework'],
+    license='GPLv3+',
+    long_description=long_description,
+    name='cumin',
+    packages=find_packages(exclude=['*.tests', '*.tests.*']),
+    platforms=['GNU/Linux', 'BSD', 'MacOSX'],
+    setup_requires=['pytest-runner>=2.7.1', 'setuptools_scm>=1.15.0'],
+    tests_require=tests_require,
+    url='https://github.com/wikimedia/cumin',
+    use_scm_version=True,
+    zip_safe=False,
 )
diff --git a/tox.ini b/tox.ini
index f6d978f..20c044c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,29 +1,28 @@
 [tox]
 minversion = 1.6
-skipsdist = True
-envlist = flake8, py27
-
-[testenv:venv]
-commands = {posargs}
+envlist = flake8, unit, bandit, prospector
 
 [testenv]
 usedevelop = True
-commands =
-  coverage run --source cumin --omit=cumin/tests/* setup.py test
-  coverage report -m
-deps =
-  -rrequirements.txt
-  -rrequirements-tests.txt
-
-[testenv:flake8]
 basepython = python2.7
-commands = flake8 cumin/
-deps = flake8
+commands =
+    flake8: flake8
+    unit: py.test --strict --cov-report term-missing --cov=cumin 
cumin/tests/unit {posargs}
+    # Avoid bandit assert_used (B101) in tests
+    bandit: bandit -l -i -r --exclude cumin/tests cumin/
+    bandit: bandit -l -i -r --skip B101 cumin/tests
+    prospector: prospector --profile "{toxinidir}/prospector.yaml" cumin/
+deps =
+    # Use install_requires and extras_require['tests'] from setup.py
+    .[tests]
+
+[testenv:integration]
+setenv =
+    USER=root
+    SUDO_USER=integration-tests
+commands =
+    "{toxinidir}/cumin/tests/integration/docker.sh" "transports/clustershell"
 
 [flake8]
-max-line-length=120
+max-line-length = 120
 statistics = True
-
-[testenv:integration-clustershell]
-basepython = python2.7
-commands = {toxinidir}/cumin/tests/integration/docker.sh 
transports/clustershell

-- 
To view, visit https://gerrit.wikimedia.org/r/338808
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: merged
Gerrit-Change-Id: I08214305bc9337267f4cf904b9acf9ecb8d5d488
Gerrit-PatchSet: 11
Gerrit-Project: operations/software/cumin
Gerrit-Branch: master
Gerrit-Owner: Volans <[email protected]>
Gerrit-Reviewer: Faidon Liambotis <[email protected]>
Gerrit-Reviewer: Gehel <[email protected]>
Gerrit-Reviewer: Giuseppe Lavagetto <[email protected]>
Gerrit-Reviewer: Volans <[email protected]>
Gerrit-Reviewer: jenkins-bot <>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to