Volans has uploaded a new change for review. ( 
https://gerrit.wikimedia.org/r/339834 )

Change subject: Make docstring pep257 compliant
......................................................................

Make docstring pep257 compliant

Bug: T158967
Change-Id: Ia0ae7dd9ffa08e7bf3ed7b40b75176400249a959
---
M cumin/__init__.py
M cumin/backends/__init__.py
M cumin/backends/direct.py
M cumin/backends/puppetdb.py
M cumin/cli.py
M cumin/grammar.py
M cumin/query.py
M cumin/tests/__init__.py
M cumin/tests/unit/__init__.py
M cumin/tests/unit/backends/__init__.py
M cumin/tests/unit/backends/test_direct.py
M cumin/tests/unit/backends/test_puppetdb.py
M cumin/tests/unit/test_backends.py
M cumin/tests/unit/test_cli.py
M cumin/tests/unit/test_grammar.py
M cumin/tests/unit/test_query.py
M cumin/tests/unit/test_transport.py
M cumin/tests/unit/test_transports.py
M cumin/tests/unit/transports/__init__.py
M cumin/tests/unit/transports/test_clustershell.py
M cumin/transport.py
M cumin/transports/__init__.py
M cumin/transports/clustershell.py
23 files changed, 388 insertions(+), 391 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/operations/software/cumin 
refs/changes/34/339834/1

diff --git a/cumin/__init__.py b/cumin/__init__.py
index e69de29..3d71781 100644
--- a/cumin/__init__.py
+++ b/cumin/__init__.py
@@ -0,0 +1 @@
+"""Automation and orchestration framework written in Python."""
diff --git a/cumin/backends/__init__.py b/cumin/backends/__init__.py
index ee5dc97..bbf14db 100644
--- a/cumin/backends/__init__.py
+++ b/cumin/backends/__init__.py
@@ -1,4 +1,4 @@
-"""Abstract backend"""
+"""Abstract backend."""
 
 import logging
 
@@ -6,61 +6,61 @@
 
 
 class InvalidQueryError(Exception):
-    """Custom exception class for invalid queries"""
+    """Custom exception class for invalid queries."""
 
 
 class BaseQuery(object):
-    """Query Builder interface"""
+    """Query Builder interface."""
 
     __metaclass__ = ABCMeta
 
     def __init__(self, config, logger=None):
-        """ Query Builder constructor
+        """Query Builder constructor.
 
-            Arguments:
-            config -- a dictionary with the parsed configuration file
-            logger -- an optional logger instance [optional, default: None]
+        Arguments:
+        config -- a dictionary with the parsed configuration file
+        logger -- an optional logger instance [optional, default: None]
         """
         self.config = config
         self.logger = logger or logging.getLogger(__name__)
 
     @abstractmethod
     def add_category(self, category, key, value=None, operator='=', neg=False):
-        """ Add a category token to the query: F:key = value
+        """Add a category token to the query 'F:key = value'.
 
-            Arguments:
-            category -- the category of the token, one of 
cumin.grammar.categories
-            key      -- the key for this category
-            value    -- the value to match, if not specified the key itself 
will be matched [optional, default: None]
-            operator -- the comparison operator to use, one of 
cumin.grammar.operators [optional: default: =]
-            neg      -- whether the token must be negated [optional, default: 
False]
+        Arguments:
+        category -- the category of the token, one of cumin.grammar.categories
+        key      -- the key for this category
+        value    -- the value to match, if not specified the key itself will 
be matched [optional, default: None]
+        operator -- the comparison operator to use, one of 
cumin.grammar.operators [optional: default: =]
+        neg      -- whether the token must be negated [optional, default: 
False]
         """
 
     @abstractmethod
     def add_hosts(self, hosts, neg=False):
-        """ Add a list of hosts to the query
+        """Add a list of hosts to the query.
 
-            Arguments:
-            hosts -- a list of hosts to match
-            neg   -- whether the token must be negated [optional, default: 
False]
+        Arguments:
+        hosts -- a list of hosts to match
+        neg   -- whether the token must be negated [optional, default: False]
         """
 
     @abstractmethod
     def open_subgroup(self):
-        """Open a subgroup in the query"""
+        """Open a subgroup in the query."""
 
     @abstractmethod
     def close_subgroup(self):
-        """Close a subgroup in the query"""
+        """Close a subgroup in the query."""
 
     @abstractmethod
     def add_and(self):
-        """Add an AND query block to the query"""
+        """Add an AND query block to the query."""
 
     @abstractmethod
     def add_or(self):
-        """Add an OR query block to the query"""
+        """Add an OR query block to the query."""
 
     @abstractmethod
     def execute(self):
-        """Execute the query and return the list of FQDN hostnames that 
matches"""
+        """Execute the query and return the list of FQDN hostnames that 
matches."""
diff --git a/cumin/backends/direct.py b/cumin/backends/direct.py
index 62e6c59..40fec5e 100644
--- a/cumin/backends/direct.py
+++ b/cumin/backends/direct.py
@@ -1,4 +1,4 @@
-"""Direct backend"""
+"""Direct backend."""
 
 from ClusterShell.NodeSet import NodeSet
 
@@ -6,30 +6,30 @@
 
 
 class DirectQuery(BaseQuery):
-    """ DirectQuery query builder
+    """DirectQuery query builder.
 
-        The 'direct' backend allow to use Cumin without any external 
dependency for the hosts selection.
-        It implements only the add_hosts() method and allow only for hosts 
expansion based on the powerful ClusterShell
-        NodeSet syntax, see 
https://clustershell.readthedocs.io/en/latest/api/NodeSet.html
+    The 'direct' backend allow to use Cumin without any external dependency 
for the hosts selection.
+    It implements only the add_hosts() method and allow only for hosts 
expansion based on the powerful ClusterShell
+    NodeSet syntax, see 
https://clustershell.readthedocs.io/en/latest/api/NodeSet.html
 
-        The typical usage for the 'direct' backend is as a reliable 
alternative in cases in which the primary host
-        selection mechanism is not working and for testing the transports 
without any external backend dependency.
+    The typical usage for the 'direct' backend is as a reliable alternative in 
cases in which the primary host
+    selection mechanism is not working and for testing the transports without 
any external backend dependency.
     """
 
     def __init__(self, config, logger=None):
-        """ Query Builder constructor
+        """Query Builder constructor.
 
-            Arguments: according to BaseQuery interface
+        Arguments: according to BaseQuery interface
         """
         super(DirectQuery, self).__init__(config, logger)
         self.hosts = NodeSet()
 
     def add_category(self, category, key, value=None, operator='=', neg=False):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         raise InvalidQueryError("Category tokens are not supported by the 
DirectQuery backend")
 
     def add_hosts(self, hosts, neg=False):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         if any(host for host in hosts if '*' in host):
             raise InvalidQueryError("Hosts globbing is not supported by the 
DirectQuery backend")
 
@@ -39,23 +39,23 @@
             self.hosts.update(hosts)
 
     def open_subgroup(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         raise InvalidQueryError("Subgroups are not supported by the 
DirectQuery backend")
 
     def close_subgroup(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         raise InvalidQueryError("Subgroups are not supported by the 
DirectQuery backend")
 
     def add_and(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         raise InvalidQueryError("Boolean AND operator is not supported by the 
DirectQuery backend")
 
     def add_or(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         pass  # Nothing to do, all hosts are added to the same NodeSet
 
     def execute(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         return list(self.hosts)
 
 
diff --git a/cumin/backends/puppetdb.py b/cumin/backends/puppetdb.py
index 9a8e4d2..6eca0f0 100644
--- a/cumin/backends/puppetdb.py
+++ b/cumin/backends/puppetdb.py
@@ -1,4 +1,4 @@
-"""PuppetDB backend"""
+"""PuppetDB backend."""
 
 import requests
 
@@ -8,10 +8,10 @@
 
 
 class PuppetDBQuery(BaseQuery):
-    """ PuppetDB query builder
+    """PuppetDB query builder.
 
-        The 'direct' backend allow to use an existing PuppetDB instance for 
the hosts selection.
-        At the moment only PuppetDB v3 API are implemented.
+    The 'direct' backend allow to use an existing PuppetDB instance for the 
hosts selection.
+    At the moment only PuppetDB v3 API are implemented.
     """
 
     base_url_template = 'https://{host}:{port}/v3/'
@@ -19,9 +19,9 @@
     hosts_keys = {'R': 'certname', 'F': 'name'}
 
     def __init__(self, config, logger=None):
-        """ Query Builder constructor
+        """Query Builder constructor.
 
-            Arguments: according to BaseQuery interface
+        Arguments: according to BaseQuery interface
         """
         super(PuppetDBQuery, self).__init__(config, logger)
         self.grouped_tokens = {'parent': None, 'bool': None, 'tokens': []}
@@ -37,15 +37,15 @@
 
     @property
     def category(self):
-        """Getter for the property category with a default value"""
+        """Getter for the property category with a default value."""
         return self._category or 'F'
 
     @category.setter
     def category(self, value):
-        """ Setter for the property category with validation
+        """Setter for the property category with validation.
 
-            Arguments:
-            value -- the value to set the category to
+        Arguments:
+        value -- the value to set the category to
         """
         if value not in self.endpoints.keys():
             raise RuntimeError("Invalid value '{category}' for category 
property".format(category=value))
@@ -55,7 +55,7 @@
         self._category = value
 
     def add_category(self, category, key, value=None, operator='=', neg=False):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         self.category = category
         if operator == '~':
             value = value.replace(r'\\', r'\\\\')  # Required by PuppetDB API
@@ -73,7 +73,7 @@
         self.current_group['tokens'].append(query)
 
     def add_hosts(self, hosts, neg=False):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         if len(hosts) == 0:
             return
 
@@ -94,37 +94,37 @@
         self.current_group['tokens'].append(query)
 
     def open_subgroup(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         token = {'parent': self.current_group, 'bool': None, 'tokens': []}
         self.current_group['tokens'].append(token)
         self.current_group = token
 
     def close_subgroup(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         self.current_group = self.current_group['parent']
 
     def add_and(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         self._add_bool('and')
 
     def add_or(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         self._add_bool('or')
 
     def execute(self):
-        """Required by BaseQuery"""
+        """Required by BaseQuery."""
         query = 
self._get_query_string(group=self.grouped_tokens).format(host_key=self.hosts_keys[self.category])
         hosts = self._execute(query, self.endpoints[self.category])
 
         return {host[self.hosts_keys[self.category]] for host in hosts}  # Set 
comprehension
 
     def _get_resource_query(self, key, value=None, operator='='):
-        """ Build a resource query based on the parameters, resolving the 
special cases for %params and @field
+        """Build a resource query based on the parameters, resolving the 
special cases for %params and @field.
 
-            Arguments:
-            key      -- the key of the resource
-            value    -- the value to match, if not specified the key itself 
will be matched [optional, default: None]
-            operator -- the comparison operator to use, one of 
cumin.grammar.operators [optional: default: =]
+        Arguments:
+        key      -- the key of the resource
+        value    -- the value to match, if not specified the key itself will 
be matched [optional, default: None]
+        operator -- the comparison operator to use, one of 
cumin.grammar.operators [optional: default: =]
         """
         if all(char in key for char in ('%', '@')):
             raise RuntimeError(("Resource key cannot contain both '%' (query a 
resource's parameter) and '@' (query a "
@@ -153,10 +153,10 @@
         return query
 
     def _get_query_string(self, group):
-        """ Recursively build and return the PuppetDB query string
+        """Recursively build and return the PuppetDB query string.
 
-            Arguments:
-            group -- a dictionary with the grouped tokens
+        Arguments:
+        group -- a dictionary with the grouped tokens
         """
         if group['bool']:
             query = '["{bool}", '.format(bool=group['bool'])
@@ -179,10 +179,10 @@
         return query
 
     def _add_bool(self, bool_op):
-        """ Add a boolean AND or OR query block to the query and validate logic
+        """Add a boolean AND or OR query block to the query and validate logic.
 
-            Arguments:
-            bool_op -- the boolean operator (and|or) to add to the query
+        Arguments:
+        bool_op -- the boolean operator (and|or) to add to the query
         """
         if self.current_group['bool'] is None:
             self.current_group['bool'] = bool_op
@@ -191,11 +191,11 @@
                 bool=bool_op, current=self.current_group['bool']))
 
     def _execute(self, query, endpoint):
-        """ Execute a query to PuppetDB API and return the parsed JSON
+        """Execute a query to PuppetDB API and return the parsed JSON.
 
-            Arguments:
-            query    -- the query parameter to send to the PuppetDB API
-            endpoint -- the endpoint of the PuppetDB API to call
+        Arguments:
+        query    -- the query parameter to send to the PuppetDB API
+        endpoint -- the endpoint of the PuppetDB API to call
         """
         self.logger.debug('Querying puppetdb: {query}'.format(query=query))
         resources = requests.get(self.url + endpoint, params={'query': query}, 
verify=True)
diff --git a/cumin/cli.py b/cumin/cli.py
index 3d7a33b..139daed 100644
--- a/cumin/cli.py
+++ b/cumin/cli.py
@@ -1,7 +1,5 @@
 #!/usr/bin/python2
-"""
-Cumin CLI entry point
-"""
+"""Cumin CLI entry point."""
 
 import argparse
 import logging
@@ -25,14 +23,14 @@
 
 
 class KeyboardInterruptError(Exception):
-    """Custom KeyboardInterrupt exception class for the SIGINT signal 
handler"""
+    """Custom KeyboardInterrupt exception class for the SIGINT signal 
handler."""
 
 
 def parse_args(argv=None):
-    """ Parse command line arguments and return them
+    """Parse command line arguments and return them.
 
-        Arguments:
-        argv -- the list of arguments to use. If None, the command line ones 
are used [optional, default: None]
+    Arguments:
+    argv -- the list of arguments to use. If None, the command line ones are 
used [optional, default: None]
     """
     sync_mode = 'sync'
     async_mode = 'async'
@@ -99,7 +97,7 @@
 
 
 def get_running_user():
-    """Ensure it's running as root and that the original user is detected and 
return it"""
+    """Ensure it's running as root and that the original user is detected and 
return it."""
     if os.getenv('USER') != 'root':
         raise RuntimeError('Unsufficient privileges, run with sudo')
     if os.getenv('SUDO_USER') in (None, 'root'):
@@ -109,14 +107,13 @@
 
 
 def setup_logging(user, filename, debug=False):
-    """ Setup the logger instance
+    """Setup the logger instance.
 
-        Arguments:
-        user     -- the real user to use in the logging formatter for auditing
-        filename -- the filename of the log file
-        debug    -- whether to set logging level to DEBUG [optional, default: 
False]
+    Arguments:
+    user     -- the real user to use in the logging formatter for auditing
+    filename -- the filename of the log file
+    debug    -- whether to set logging level to DEBUG [optional, default: 
False]
     """
-
     file_path = os.path.dirname(filename)
     if not os.path.exists(file_path):
         os.makedirs(file_path, 0770)
@@ -136,10 +133,10 @@
 
 
 def parse_config(config_file):
-    """ Parse the YAML configuration file
+    """Parse the YAML configuration file.
 
-        Arguments:
-        config_file -- the path of the configuration file to load
+    Arguments:
+    config_file -- the path of the configuration file to load
     """
     with open(config_file, 'r') as f:
         config = yaml.safe_load(f)
@@ -148,11 +145,11 @@
 
 
 def sigint_handler(*args):
-    """ Signal handler for Ctrl+c / SIGINT, raises KeyboardInterruptError
+    """Signal handler for Ctrl+c / SIGINT, raises KeyboardInterruptError.
 
-        Arguments (as defined in 
https://docs.python.org/2/library/signal.html):
-        signum -- the signal number
-        frame  -- the current stack frame
+    Arguments (as defined in https://docs.python.org/2/library/signal.html):
+    signum -- the signal number
+    frame  -- the current stack frame
     """
     if not sys.stdout.isatty():
         logger.warning('Execution interrupted by Ctrl+c/SIGINT')
@@ -189,22 +186,22 @@
 
 
 def stderr(message, end='\n'):
-    """ Print a message to stderr and flush
+    r"""Print a message to stderr and flush.
 
-        Arguments:
-        message -- the message to print to sys.stderr
-        end     -- the character to use at the end of the message. [optional, 
default: \n]
+    Arguments:
+    message -- the message to print to sys.stderr
+    end     -- the character to use at the end of the message. [optional, 
default: \n]
     """
     tqdm.write('{color}{message}{reset}'.format(
         color=colorama.Fore.YELLOW, message=message, 
reset=colorama.Style.RESET_ALL), file=sys.stderr, end=end)
 
 
 def get_hosts(args, config):
-    """ Resolve the hosts selection into a list of hosts and return it. Raises 
KeyboardInterruptError
+    """Resolve the hosts selection into a list of hosts and return it. Raises 
KeyboardInterruptError.
 
-        Arguments:
-        args   -- ArgumentParser instance with parsed command line arguments
-        config -- a dictionary with the parsed configuration file
+    Arguments:
+    args   -- ArgumentParser instance with parsed command line arguments
+    config -- a dictionary with the parsed configuration file
     """
     query = QueryBuilder(args.hosts, config, logger).build()
     hosts = query.execute()
@@ -242,11 +239,11 @@
 
 
 def run(args, config):
-    """ Execute the commands on the selected hosts and print the results
+    """Execute the commands on the selected hosts and print the results.
 
-        Arguments:
-        args   -- ArgumentParser instance with parsed command line arguments
-        config -- a dictionary with the parsed configuration file
+    Arguments:
+    args   -- ArgumentParser instance with parsed command line arguments
+    config -- a dictionary with the parsed configuration file
     """
     hosts = get_hosts(args, config)
     if len(hosts) == 0:
@@ -258,10 +255,10 @@
 
 
 def main(argv=None):
-    """ CLI entry point. Execute commands on hosts according to arguments
+    """CLI entry point. Execute commands on hosts according to arguments.
 
-        Arguments:
-        argv -- the list of arguments to use. If None, the command line ones 
are used [optional, default: None]
+    Arguments:
+    argv -- the list of arguments to use. If None, the command line ones are 
used [optional, default: None]
     """
     signal.signal(signal.SIGINT, sigint_handler)
     colorama.init()
diff --git a/cumin/grammar.py b/cumin/grammar.py
index 8cebf45..c5dbb0c 100644
--- a/cumin/grammar.py
+++ b/cumin/grammar.py
@@ -1,4 +1,4 @@
-"""Query grammar definition"""
+"""Query grammar definition."""
 
 import pyparsing as pp
 
@@ -12,26 +12,25 @@
 
 
 def _grammar():
-    """ Define the query grammar
+    """Define the query grammar.
 
-        Some query examples:
-        - All hosts: *
-        - Hosts globbing: host10*
-        - ClusterShell syntax for hosts expansion: 
host10[10-42].domain,host2010.other-domain
-        - Category based key-value selection: F:key = value
-        - A complex selection:
-          host10[10-42].*.domain or (not F:key1 = value1 and host10*) or 
(F:key2 > value2 and F:key3 ~ '[v]alue[0-9]+')
+    Some query examples:
+    - All hosts: *
+    - Hosts globbing: host10*
+    - ClusterShell syntax for hosts expansion: 
host10[10-42].domain,host2010.other-domain
+    - Category based key-value selection: F:key = value
+    - A complex selection:
+      host10[10-42].*.domain or (not F:key1 = value1 and host10*) or (F:key2 > 
value2 and F:key3 ~ '[v]alue[0-9]+')
 
-        Backus-Naur form (BNF) of the grammar:
-                  <query> ::= <item> | <item> <and_or> <query>
-                   <item> ::= [<neg>] <query-token> | [<neg>] "(" <query> ")"
-            <query-token> ::= <token> | <hosts>
-                  <token> ::= <category>:<key> [<operator> <value>]
+    Backus-Naur form (BNF) of the grammar:
+              <query> ::= <item> | <item> <and_or> <query>
+               <item> ::= [<neg>] <query-token> | [<neg>] "(" <query> ")"
+        <query-token> ::= <token> | <hosts>
+              <token> ::= <category>:<key> [<operator> <value>]
 
-        Given that the pyparsing library defines the grammar in a BNF-like 
style, for the details of the tokens not
-        specified above check directly the code.
+    Given that the pyparsing library defines the grammar in a BNF-like style, 
for the details of the tokens not
+    specified above check directly the code.
     """
-
     # Boolean operators
     and_or = (pp.Keyword('and', caseless=True) | pp.Keyword('or', 
caseless=True))('bool')
     neg = pp.Keyword('not', caseless=True)('neg')  # 'neg' is used to allow 
the use of dot notation, 'not' is reserved
diff --git a/cumin/query.py b/cumin/query.py
index 08c0ee9..9614382 100644
--- a/cumin/query.py
+++ b/cumin/query.py
@@ -1,4 +1,4 @@
-"""Query handling: factory and builder"""
+"""Query handling: factory and builder."""
 
 import importlib
 import logging
@@ -10,15 +10,15 @@
 
 
 class Query(object):
-    """Query factory class"""
+    """Query factory class."""
 
     @staticmethod
     def new(config, logger=None):
-        """ Return an instance of the query class for the configured backend
+        """Return an instance of the query class for the configured backend.
 
-            Arguments:
-            config - the configuration dictionary
-            logger - an optional logging instance [optional, default: None]
+        Arguments:
+        config - the configuration dictionary
+        logger - an optional logging instance [optional, default: None]
         """
         try:
             module = 
importlib.import_module('cumin.backends.{backend}'.format(backend=config['backend']))
@@ -29,18 +29,18 @@
 
 
 class QueryBuilder(object):
-    """ Query builder class
+    """Query builder class.
 
-        Parse a given query string and converts it into a query object for the 
configured backend
+    Parse a given query string and converts it into a query object for the 
configured backend
     """
 
     def __init__(self, query_string, config, logger=None):
-        """ Query builder constructor
+        """Query builder constructor.
 
-            Arguments:
-            query_string -- the query string to be parsed and passed to the 
query builder
-            config       -- the configuration dictionary
-            logger       -- an optional logging instance [optional, default: 
None]
+        Arguments:
+        query_string -- the query string to be parsed and passed to the query 
builder
+        config       -- the configuration dictionary
+        logger       -- an optional logging instance [optional, default: None]
         """
         self.logger = logger or logging.getLogger(__name__)
         self.query_string = query_string.strip()
@@ -48,7 +48,7 @@
         self.level = 0  # Nesting level for sub-groups
 
     def build(self):
-        """Parse the query string according to the grammar and build the query 
object for the configured backend"""
+        """Parse the query string according to the grammar and build the query 
object for the configured backend."""
         parsed = grammar.parseString(self.query_string, parseAll=True)
         for token in parsed:
             self._parse_token(token)
@@ -56,11 +56,11 @@
         return self.query
 
     def _parse_token(self, token, level=0):
-        """ Recursively interpret the tokens returned by the grammar parsing
+        """Recursively interpret the tokens returned by the grammar parsing.
 
-            Arguments:
-            token -- a single token returned by the grammar parsing
-            level -- Nesting level in case of sub-groups in the query 
[optional, default: 0]
+        Arguments:
+        token -- a single token returned by the grammar parsing
+        level -- Nesting level in case of sub-groups in the query [optional, 
default: 0]
         """
         if not isinstance(token, ParseResults):
             raise RuntimeError("Invalid query string syntax '{query}'. Token 
is '{token}'".format(
@@ -74,11 +74,11 @@
             self._build_token(token_dict, level)
 
     def _build_token(self, token_dict, level):
-        """ Buld a token into the query object for the configured backend
+        """Buld a token into the query object for the configured backend.
 
-            Arguments:
-            token_dict -- the dictionary of the parsed token returned by the 
grammar parsing
-            level      -- Nesting level in the query
+        Arguments:
+        token_dict -- the dictionary of the parsed token returned by the 
grammar parsing
+        level      -- Nesting level in the query
         """
         keys = token_dict.keys()
 
diff --git a/cumin/tests/__init__.py b/cumin/tests/__init__.py
index 93310cd..e3d937b 100644
--- a/cumin/tests/__init__.py
+++ b/cumin/tests/__init__.py
@@ -1,4 +1,4 @@
-"""Tests utils"""
+"""Tests utils."""
 
 import logging
 import os
@@ -8,11 +8,11 @@
 
 
 def get_fixture(filename, as_string=False):
-    """ Return the content of a fixture file
+    """Return the content of a fixture file.
 
-        Arguments:
-        filename  -- the file to be opened in the test's fixture directory
-        as_string -- return the content as a multiline string instead of a 
list of lines [optional, default: False]
+    Arguments:
+    filename  -- the file to be opened in the test's fixture directory
+    as_string -- return the content as a multiline string instead of a list of 
lines [optional, default: False]
     """
     with open(os.path.join(_tests_base_path, 'fixtures', filename)) as f:
         if as_string:
diff --git a/cumin/tests/unit/__init__.py b/cumin/tests/unit/__init__.py
index e69de29..e0310a0 100644
--- a/cumin/tests/unit/__init__.py
+++ b/cumin/tests/unit/__init__.py
@@ -0,0 +1 @@
+"""Unit tests."""
diff --git a/cumin/tests/unit/backends/__init__.py 
b/cumin/tests/unit/backends/__init__.py
index e69de29..7a48c9b 100644
--- a/cumin/tests/unit/backends/__init__.py
+++ b/cumin/tests/unit/backends/__init__.py
@@ -0,0 +1 @@
+"""Backend specific tests."""
diff --git a/cumin/tests/unit/backends/test_direct.py 
b/cumin/tests/unit/backends/test_direct.py
index a217569..97e9506 100644
--- a/cumin/tests/unit/backends/test_direct.py
+++ b/cumin/tests/unit/backends/test_direct.py
@@ -1,4 +1,4 @@
-"""Direct backend tests"""
+"""Direct backend tests."""
 import unittest
 
 from ClusterShell.NodeSet import NodeSet
@@ -7,33 +7,33 @@
 
 
 class TestDirectQueryClass(unittest.TestCase):
-    """Direct backend query_class test class"""
+    """Direct backend query_class test class."""
 
     def test_query_class(self):
-        """An instance of query_class should be an instance of BaseQuery"""
+        """An instance of query_class should be an instance of BaseQuery."""
         query = direct.query_class({})
         self.assertIsInstance(query, BaseQuery)
 
 
 class TestDirectQuery(unittest.TestCase):
-    """Direct backend query test class"""
+    """Direct backend query test class."""
 
     def setUp(self):
-        """Setup an instace of DirectQuery for each test"""
+        """Setup an instace of DirectQuery for each test."""
         self.query = direct.DirectQuery({})
 
     def test_instantiation(self):
-        """An instance of DirectQuery should be an instance of BaseQuery"""
+        """An instance of DirectQuery should be an instance of BaseQuery."""
         self.assertIsInstance(self.query, BaseQuery)
         self.assertDictEqual(self.query.config, {})
 
     def test_add_category_fact(self):
-        """Calling add_category() should raise InvalidQueryError"""
+        """Calling add_category() should raise InvalidQueryError."""
         with self.assertRaisesRegexp(InvalidQueryError, r"Category tokens are 
not supported"):
             self.query.add_category('F', 'key', 'value')
 
     def test_add_hosts(self):
-        """Calling add_hosts() should add the hosts to the NodeSet"""
+        """Calling add_hosts() should add the hosts to the NodeSet."""
         self.assertListEqual(list(self.query.hosts), [])
         # No hosts
         self.query.add_hosts(NodeSet.fromlist([]))
@@ -52,28 +52,28 @@
             self.query.add_hosts(NodeSet.fromlist(['host1*']))
 
     def test_open_subgroup(self):
-        """Calling open_subgroup() should raise InvalidQueryError"""
+        """Calling open_subgroup() should raise InvalidQueryError."""
         with self.assertRaisesRegexp(InvalidQueryError, r"Subgroups are not 
supported"):
             self.query.open_subgroup()
 
     def test_close_subgroup(self):
-        """Calling close_subgroup() should raise InvalidQueryError"""
+        """Calling close_subgroup() should raise InvalidQueryError."""
         with self.assertRaisesRegexp(InvalidQueryError, r"Subgroups are not 
supported"):
             self.query.close_subgroup()
 
     def test_add_and(self):
-        """Calling add_and() should raise InvalidQueryError"""
+        """Calling add_and() should raise InvalidQueryError."""
         with self.assertRaisesRegexp(InvalidQueryError, r"Boolean AND operator 
is not supported"):
             self.query.add_and()
 
     def test_add_or(self):
-        """Calling add_or() should be a noop"""
+        """Calling add_or() should be a noop."""
         self.assertListEqual(list(self.query.hosts), [])
         self.query.add_or()
         self.assertListEqual(list(self.query.hosts), [])
 
     def test_execute(self):
-        """Calling execute() should return the list of hosts"""
+        """Calling execute() should return the list of hosts."""
         self.assertListEqual(list(self.query.hosts), self.query.execute())
         self.query.add_hosts(NodeSet.fromlist(['host1', 'host2']))
         self.assertListEqual(list(self.query.hosts), self.query.execute())
diff --git a/cumin/tests/unit/backends/test_puppetdb.py 
b/cumin/tests/unit/backends/test_puppetdb.py
index 37195dd..6bd65fd 100644
--- a/cumin/tests/unit/backends/test_puppetdb.py
+++ b/cumin/tests/unit/backends/test_puppetdb.py
@@ -1,4 +1,4 @@
-"""PuppetDB backend tests"""
+"""PuppetDB backend tests."""
 import unittest
 
 import requests_mock
@@ -9,32 +9,32 @@
 
 
 class TestPuppetDBQueryClass(unittest.TestCase):
-    """PuppetDB backend query_class test class"""
+    """PuppetDB backend query_class test class."""
 
     def test_query_class(self):
-        """An instance of query_class should be an instance of BaseQuery"""
+        """An instance of query_class should be an instance of BaseQuery."""
         query = puppetdb.query_class({})
         self.assertIsInstance(query, BaseQuery)
 
 
 class TestPuppetDBQuery(unittest.TestCase):
-    """PuppetDB backend query test class"""
+    """PuppetDB backend query test class."""
 
     def setUp(self):
-        """Setup an instace of PuppetDBQuery for each test"""
+        """Setup an instace of PuppetDBQuery for each test."""
         self.query = puppetdb.PuppetDBQuery({})
 
     def test_instantiation(self):
-        """An instance of PuppetDBQuery should be an instance of BaseQuery"""
+        """An instance of PuppetDBQuery should be an instance of BaseQuery."""
         self.assertIsInstance(self.query, BaseQuery)
         self.assertEqual(self.query.url, 'https://localhost:443/v3/')
 
     def test_category_getter(self):
-        """Access to category property should return facts by default"""
+        """Access to category property should return facts by default."""
         self.assertEqual(self.query.category, 'F')
 
     def test_category_setter(self):
-        """Setting category property should accept only valid values, raise 
RuntimeError otherwise"""
+        """Setting category property should accept only valid values, raise 
RuntimeError otherwise."""
         self.query.category = 'F'
         self.assertEqual(self.query.category, 'F')
 
@@ -54,7 +54,7 @@
             query.category = 'F'
 
     def test_add_category_fact(self):
-        """Calling add_category() with a fact should add the proper query 
token to the object"""
+        """Calling add_category() with a fact should add the proper query 
token to the object."""
         self.assertListEqual(self.query.current_group['tokens'], [])
         # Base fact query
         self.query.add_category('F', 'key', 'value')
@@ -73,48 +73,48 @@
         self.assertListEqual(self.query.current_group['tokens'], [r'["~", 
["fact", "key"], "value\\\\escaped"]'])
 
     def test_add_category_resource_base(self):
-        """Calling add_category() with a base resource query should add the 
proper query token to the object"""
+        """Calling add_category() with a base resource query should add the 
proper query token to the object."""
         self.assertListEqual(self.query.current_group['tokens'], [])
         self.query.add_category('R', 'key', 'value')
         self.assertListEqual(self.query.current_group['tokens'],
                              ['["and", ["=", "type", "key"], ["=", "title", 
"value"]]'])
 
     def test_add_category_resource_neg(self):
-        """Calling add_category() with a negated resource query should add the 
proper query token to the object"""
+        """Calling add_category() with a negated resource query should add the 
proper query token to the object."""
         self.query.add_category('R', 'key', 'value', neg=True)
         self.assertListEqual(self.query.current_group['tokens'],
                              ['["not", ["and", ["=", "type", "key"], ["=", 
"title", "value"]]]'])
 
     def test_add_category_resource_regex(self):
-        """Calling add_category() with a regex resource query should add the 
proper query token to the object"""
+        """Calling add_category() with a regex resource query should add the 
proper query token to the object."""
         self.query.add_category('R', 'key', r'value\\escaped', operator='~')
         self.assertListEqual(self.query.current_group['tokens'],
                              [r'["and", ["=", "type", "key"], ["~", "title", 
"value\\\\escaped"]]'])
 
     def test_add_category_resource_parameter(self):
-        """Calling add_category() with a resource's parameter query should add 
the proper query token to the object"""
+        """Calling add_category() with a resource's parameter query should add 
the proper query token to the object."""
         self.query.add_category('R', 'resource%param', 'value')
         self.assertListEqual(self.query.current_group['tokens'],
                              ['["and", ["=", "type", "resource"], ["=", 
["parameter", "param"], "value"]]'])
 
     def test_add_category_resource_field(self):
-        """Calling add_category() with a resource's field query should add the 
proper query token to the object"""
+        """Calling add_category() with a resource's field query should add the 
proper query token to the object."""
         self.query.add_category('R', 'resource@field', 'value')
         self.assertListEqual(self.query.current_group['tokens'],
                              ['["and", ["=", "type", "resource"], ["=", 
"field", "value"]]'])
 
     def test_add_category_resource_title(self):
-        """Calling add_category() with a resource's title query should add the 
proper query token to the object"""
+        """Calling add_category() with a resource's title query should add the 
proper query token to the object."""
         self.query.add_category('R', 'resource')
         self.assertListEqual(self.query.current_group['tokens'], ['["and", 
["=", "type", "resource"]]'])
 
     def test_add_category_resource_parameter_field(self):
-        """Calling add_category() with both a parameter and a field should 
raise RuntimeError"""
+        """Calling add_category() with both a parameter and a field should 
raise RuntimeError."""
         with self.assertRaisesRegexp(RuntimeError, 'Resource key cannot 
contain both'):
             self.query.add_category('R', 'resource@field%param')
 
     def test_add_hosts(self):
-        """Calling add_hosts() with a resource should add the proper query 
token to the object"""
+        """Calling add_hosts() with a resource should add the proper query 
token to the object."""
         self.assertListEqual(self.query.current_group['tokens'], [])
         # No hosts
         self.query.add_hosts([])
@@ -138,7 +138,7 @@
         self.assertListEqual(self.query.current_group['tokens'], [r'["or", 
["~", "{host_key}", "^host1.*\\.domain$"]]'])
 
     def test_open_subgroup(self):
-        """Calling open_subgroup() should open a subgroup and relate it to 
it's parent"""
+        """Calling open_subgroup() should open a subgroup and relate it to 
it's parent."""
         parent = {'parent': None, 'bool': None, 'tokens': []}
         child = {'parent': parent, 'bool': None, 'tokens': ['["or", ["=", 
"{host_key}", "host"]]']}
         parent['tokens'].append(child)
@@ -148,7 +148,7 @@
         self.assertIsNotNone(self.query.current_group['parent'])
 
     def test_close_subgroup(self):
-        """Calling close_subgroup() should close a subgroup and return to the 
parent's context"""
+        """Calling close_subgroup() should close a subgroup and return to the 
parent's context."""
         self.query.open_subgroup()
         self.query.close_subgroup()
         self.assertEqual(len(self.query.current_group['tokens']), 1)
@@ -156,19 +156,19 @@
         self.assertIsNone(self.query.current_group['parent'])
 
     def test_add_and(self):
-        """Calling add_and() should set the boolean property to the current 
group to 'and'"""
+        """Calling add_and() should set the boolean property to the current 
group to 'and'."""
         self.assertIsNone(self.query.current_group['bool'])
         self.query.add_and()
         self.assertEqual(self.query.current_group['bool'], 'and')
 
     def test_add_or(self):
-        """Calling add_or() should set the boolean property to the current 
group to 'or'"""
+        """Calling add_or() should set the boolean property to the current 
group to 'or'."""
         self.assertIsNone(self.query.current_group['bool'])
         self.query.add_or()
         self.assertEqual(self.query.current_group['bool'], 'or')
 
     def test_add_and_or(self):
-        """Calling add_or() and add_and() in the same group should raise 
InvalidQueryError"""
+        """Calling add_or() and add_and() in the same group should raise 
InvalidQueryError."""
         self.query.add_hosts(['host1'])
         self.query.add_or()
         self.query.add_hosts(['host2'])
@@ -179,14 +179,14 @@
 
 @requests_mock.Mocker()
 class TestPuppetDBQueryExecute(unittest.TestCase):
-    """PuppetDBQuery test execute() method class"""
+    """PuppetDBQuery test execute() method class."""
 
     def setUp(self):
-        """Setup an instace of PuppetDBQuery for each test"""
+        """Setup an instace of PuppetDBQuery for each test."""
         self.query = puppetdb.PuppetDBQuery({})
 
     def _register_uris(self, mocked_requests):
-        """Setup the requests library mock for each test"""
+        """Setup the requests library mock for each test."""
         # Register a mocked_requests valid response for each endpoint
         for category in self.query.endpoints.keys():
             endpoint = self.query.endpoints[category]
@@ -202,7 +202,7 @@
                                      status_code=400, complete_qs=True)
 
     def test_nodes_endpoint(self, mocked_requests):
-        """Calling execute() with a query that goes to the nodes endpoint 
should return the list of hosts"""
+        """Calling execute() with a query that goes to the nodes endpoint 
should return the list of hosts."""
         self._register_uris(mocked_requests)
         self.query.add_hosts(['nodes_host1', 'nodes_host2'])
         hosts = self.query.execute()
@@ -210,7 +210,7 @@
         self.assertEqual(mocked_requests.call_count, 1)
 
     def test_resources_endpoint(self, mocked_requests):
-        """Calling execute() with a query that goes to the resources endpoint 
should return the list of hosts"""
+        """Calling execute() with a query that goes to the resources endpoint 
should return the list of hosts."""
         self._register_uris(mocked_requests)
         self.query.add_category('R', 'Class', 'value')
         hosts = self.query.execute()
@@ -218,7 +218,7 @@
         self.assertEqual(mocked_requests.call_count, 1)
 
     def test_with_boolean_operator(self, mocked_requests):
-        """Calling execute() with a query with a boolean operator should 
return the list of hosts"""
+        """Calling execute() with a query with a boolean operator should 
return the list of hosts."""
         self._register_uris(mocked_requests)
         self.query.add_hosts(['nodes_host1'])
         self.query.add_or()
@@ -228,7 +228,7 @@
         self.assertEqual(mocked_requests.call_count, 1)
 
     def test_with_subgroup(self, mocked_requests):
-        """Calling execute() with a query with a subgroup return the list of 
hosts"""
+        """Calling execute() with a query with a subgroup return the list of 
hosts."""
         self._register_uris(mocked_requests)
         self.query.open_subgroup()
         self.query.add_hosts(['nodes_host1'])
@@ -240,14 +240,14 @@
         self.assertEqual(mocked_requests.call_count, 1)
 
     def test_empty(self, mocked_requests):
-        """Calling execute() with a query that return no hosts should return 
an empty list"""
+        """Calling execute() with a query that return no hosts should return 
an empty list."""
         self._register_uris(mocked_requests)
         hosts = self.query.execute()
         self.assertEqual(hosts, set())
         self.assertEqual(mocked_requests.call_count, 1)
 
     def test_error(self, mocked_requests):
-        """Calling execute() if the request fails it should raise the requests 
exception"""
+        """Calling execute() if the request fails it should raise the requests 
exception."""
         self._register_uris(mocked_requests)
         self.query.current_group['tokens'].append('invalid_query')
         with self.assertRaises(HTTPError):
@@ -255,7 +255,7 @@
             self.assertEqual(mocked_requests.call_count, 1)
 
     def test_complex_query(self, mocked_requests):
-        """Calling execute() with a complex query should return the exptected 
structure"""
+        """Calling execute() with a complex query should return the exptected 
structure."""
         category = 'R'
         endpoint = self.query.endpoints[category]
         key = self.query.hosts_keys[category]
diff --git a/cumin/tests/unit/test_backends.py 
b/cumin/tests/unit/test_backends.py
index 5e050e5..44aaa64 100644
--- a/cumin/tests/unit/test_backends.py
+++ b/cumin/tests/unit/test_backends.py
@@ -1,13 +1,13 @@
-"""Abstract query tests"""
+"""Abstract query tests."""
 import unittest
 
 from cumin.backends import BaseQuery
 
 
 class TestBaseQuery(unittest.TestCase):
-    """BaseQuery class tests"""
+    """Class BaseQuery tests."""
 
     def test_instantiation(self):
-        """BaseQuery is not instantiable being an abstract class"""
+        """Class BaseQuery is not instantiable being an abstract class."""
         with self.assertRaises(TypeError):
             BaseQuery({})
diff --git a/cumin/tests/unit/test_cli.py b/cumin/tests/unit/test_cli.py
index 87c1349..0d9a938 100644
--- a/cumin/tests/unit/test_cli.py
+++ b/cumin/tests/unit/test_cli.py
@@ -1,4 +1,4 @@
-"""CLI tests"""
+"""CLI tests."""
 import unittest
 
 from logging import DEBUG, INFO
@@ -14,17 +14,17 @@
 
 
 class TestCLI(unittest.TestCase):
-    """CLI module tests"""
+    """CLI module tests."""
 
     def _validate_parsed_args(self, args):
-        """Validate that the parsed args have the proper values"""
+        """Validate that the parsed args have the proper values."""
         self.assertTrue(args.debug)
         self.assertEqual(args.config, 'doc/examples/config.yaml')
         self.assertEqual(args.hosts, 'host')
         self.assertEqual(args.commands, ['command1', 'command2'])
 
     def test_parse_args(self):
-        """A standard set of command line parameters should be properly parsed 
into their respective variables"""
+        """A standard set of command line parameters should be properly parsed 
into their respective variables."""
         args = cli.parse_args(argv=_ARGV)
         self._validate_parsed_args(args)
 
@@ -33,7 +33,7 @@
             self._validate_parsed_args(args)
 
     def test_get_running_user(self):
-        """Unsufficient permissions or unknown user should raise RuntimeError 
and a proper user should be detected"""
+        """Unsufficient permissions or unknown user should raise RuntimeError 
and a proper user should be detected."""
         env = {'USER': None, 'SUDO_USER': None}
         with mock.patch('os.getenv', env.get):
             with self.assertRaisesRegexp(RuntimeError, r'Unsufficient 
privileges, run with sudo'):
@@ -51,7 +51,7 @@
     @mock.patch('cumin.cli.RotatingFileHandler')
     @mock.patch('cumin.cli.logger')
     def test_setup_logging(self, mocked_logging, mocked_file_handler, 
mocked_os):
-        """Calling setup_logging() should properly setup the logger"""
+        """Calling setup_logging() should properly setup the logger."""
         mocked_os.path.exists.return_value = False
         cli.setup_logging('user', '/path/to/filename')
         mocked_logging.setLevel.assert_called_with(INFO)
@@ -61,7 +61,7 @@
         mocked_logging.setLevel.assert_called_with(DEBUG)
 
     def test_parse_config(self):
-        """The configuration file is properly parsed and accessible"""
+        """The configuration file is properly parsed and accessible."""
         config = cli.parse_config('doc/examples/config.yaml')
         self.assertTrue('log_file' in config)
 
@@ -70,7 +70,7 @@
     @mock.patch('cumin.cli.sys.stdout.isatty')
     @mock.patch('cumin.cli.logger')
     def test_sigint_handler(self, mocked_logging, mocked_isatty, 
mocked_raw_input, mocked_stderr):
-        """Calling the SIGINT handler should raise KeyboardInterrupt or not 
based on tty and answer"""
+        """Calling the SIGINT handler should raise KeyboardInterrupt or not 
based on tty and answer."""
         # Signal handler called without a tty
         mocked_isatty.return_value = False
         with self.assertRaises(cli.KeyboardInterruptError):
@@ -101,6 +101,6 @@
 
     @mock.patch('cumin.cli.tqdm')
     def test_stderr(self, mocked_tqdm):
-        """Calling stderr() should call tqdm.write()"""
+        """Calling stderr() should call tqdm.write()."""
         cli.stderr('message')
         self.assertTrue(mocked_tqdm.write.called)
diff --git a/cumin/tests/unit/test_grammar.py b/cumin/tests/unit/test_grammar.py
index 7ddea30..85e1db1 100644
--- a/cumin/tests/unit/test_grammar.py
+++ b/cumin/tests/unit/test_grammar.py
@@ -1,4 +1,4 @@
-"""Grammar tests"""
+"""Grammar tests."""
 import unittest
 
 from cumin.grammar import grammar
@@ -6,32 +6,32 @@
 
 
 class TestGrammar(unittest.TestCase):
-    """Grammar class tests"""
+    """Grammar class tests."""
 
     def _get_category_key_token(self, category='F', key='key1', operator='=', 
value='value1'):
-        """Generate and return a category token string and it's expected 
dictionary of tokens when parsed"""
+        """Generate and return a category token string and it's expected 
dictionary of tokens when parsed."""
         expected = {'category': category, 'key': key, 'operator': operator, 
'value': value}
         token = '{category}:{key} {operator} {value}'.format(**expected)
         return token, expected
 
     def test_valid_strings(self):
-        """Run quick pyparsing test over valid grammar strings"""
+        """Run quick pyparsing test over valid grammar strings."""
         results = grammar.runTests(get_fixture('valid_grammars.txt', 
as_string=True))
         self.assertTrue(results[0])
 
     def test_invalid_strings(self):
-        """Run quick pyparsing test over invalid grammar strings"""
+        """Run quick pyparsing test over invalid grammar strings."""
         results = grammar.runTests(get_fixture('invalid_grammars.txt', 
as_string=True), failureTests=True)
         self.assertTrue(results[0])
 
     def test_single_category_key_token(self):
-        """A valid single token with a category that has key is properly 
parsed and interpreted"""
+        """A valid single token with a category that has key is properly 
parsed and interpreted."""
         token, expected = self._get_category_key_token()
         parsed = grammar.parseString(token, parseAll=True)
         self.assertDictEqual(parsed[0].asDict(), expected)
 
     def test_hosts_selection(self):
-        """A host selection is properly parsed and interpreted"""
+        """A host selection is properly parsed and interpreted."""
         hosts = {'hosts': 'host[10-20,30-40].domain'}
         parsed = grammar.parseString(hosts['hosts'], parseAll=True)
         self.assertDictEqual(parsed[0].asDict(), hosts)
diff --git a/cumin/tests/unit/test_query.py b/cumin/tests/unit/test_query.py
index 4667a77..a6b91dc 100644
--- a/cumin/tests/unit/test_query.py
+++ b/cumin/tests/unit/test_query.py
@@ -1,4 +1,4 @@
-"""Query handling tests"""
+"""Query handling tests."""
 import os
 import pkgutil
 import unittest
@@ -13,26 +13,26 @@
 
 
 class QueryFactory(object):
-    """Query factory class"""
+    """Query factory class."""
 
     @staticmethod
     def new(config, logger=None):
-        """Return an instance of the mocked query class"""
+        """Return an instance of the mocked query class."""
         if not isinstance(config, dict):
             raise AssertionError("Expected instance of dict, got type '{type}' 
for config.".format(type=type(config)))
         return mock.MagicMock(spec_set=BaseQuery)
 
 
 class TestQuery(unittest.TestCase):
-    """Query factory class tests"""
+    """Query factory class tests."""
 
     def test_invalid_backend(self):
-        """Passing an invalid backend should raise RuntimeError"""
+        """Passing an invalid backend should raise RuntimeError."""
         with self.assertRaisesRegexp(RuntimeError, r"ImportError\('No module 
named non_existent_backend'"):
             Query.new({'backend': 'non_existent_backend'})
 
     def test_missing_query_class(self):
-        """Passing a backend without a defined query_class should raise 
RuntimeError"""
+        """Passing a backend without a defined query_class should raise 
RuntimeError."""
         module = mock.MagicMock()
         del module.query_class
         with mock.patch('importlib.import_module', lambda _: module):
@@ -40,14 +40,14 @@
                 Query.new({'backend': 'invalid_backend'})
 
     def test_valid_backend(self):
-        """Passing a valid backend should return an instance of BaseQuery"""
+        """Passing a valid backend should return an instance of BaseQuery."""
         backends = [name for _, name, _ in 
pkgutil.iter_modules([os.path.join('cumin', 'backends')])]
         for backend in backends:
             self.assertIsInstance(Query.new({'backend': backend}), BaseQuery)
 
 
 class TestQueryBuilder(unittest.TestCase):
-    """QueryBuilder class tests"""
+    """Class QueryBuilder tests."""
 
     query_string = 'host1 or (not F:key1 = value and R:key2 ~ regex) or host2'
     invalid_query_string = 'host1 and or not F:key1 value'
@@ -55,7 +55,7 @@
 
     @mock.patch('cumin.query.Query', QueryFactory)
     def test_instantiation(self):
-        """QueryBuilder should create an instance of a query_class for the 
given backend"""
+        """Class QueryBuilder should create an instance of a query_class for 
the given backend."""
         query_builder = QueryBuilder(self.query_string, self.config)
         self.assertIsInstance(query_builder, QueryBuilder)
         self.assertIsInstance(query_builder.query, BaseQuery)
@@ -64,8 +64,7 @@
 
     @mock.patch('cumin.query.Query', QueryFactory)
     def test_build_valid(self):
-        """QueryBuilder.build() should parse and build the query object for a 
valid query"""
-
+        """QueryBuilder.build() should parse and build the query object for a 
valid query."""
         query_builder = QueryBuilder(self.query_string, self.config)
         query_builder.build()
 
@@ -81,24 +80,21 @@
 
     @mock.patch('cumin.query.Query', QueryFactory)
     def test_build_glob_host(self):
-        """QueryBuilder.build() should parse a glob host"""
-
+        """QueryBuilder.build() should parse a glob host."""
         query_builder = QueryBuilder('host1*', self.config)
         query_builder.build()
         
query_builder.query.add_hosts.assert_called_once_with(hosts=NodeSet.fromlist(['host1*']))
 
     @mock.patch('cumin.query.Query', QueryFactory)
     def test_build_invalid(self):
-        """QueryBuilder.build() should raise ParseException for an invalid 
query"""
-
+        """QueryBuilder.build() should raise ParseException for an invalid 
query."""
         query_builder = QueryBuilder(self.invalid_query_string, self.config)
         with self.assertRaisesRegexp(ParseException, r"Expected end of text"):
             query_builder.build()
 
     @mock.patch('cumin.query.Query', QueryFactory)
     def test__parse_token(self):
-        """QueryBuilder._parse_token() should raise RuntimeError for an 
invalid token"""
-
+        """QueryBuilder._parse_token() should raise RuntimeError for an 
invalid token."""
         query_builder = QueryBuilder(self.invalid_query_string, self.config)
         with self.assertRaisesRegexp(RuntimeError, r"Invalid query string 
syntax"):
             query_builder._parse_token('invalid_token')
diff --git a/cumin/tests/unit/test_transport.py 
b/cumin/tests/unit/test_transport.py
index 8fae76e..1aa7bd4 100644
--- a/cumin/tests/unit/test_transport.py
+++ b/cumin/tests/unit/test_transport.py
@@ -1,4 +1,4 @@
-"""Transport class tests"""
+"""Transport class tests."""
 
 import os
 import pkgutil
@@ -11,15 +11,15 @@
 
 
 class TestTransport(unittest.TestCase):
-    """Transport factory class tests"""
+    """Transport factory class tests."""
 
     def test_invalid_transport(self):
-        """Passing an invalid transport should raise RuntimeError"""
+        """Passing an invalid transport should raise RuntimeError."""
         with self.assertRaisesRegexp(RuntimeError, r"ImportError\('No module 
named non_existent_transport'"):
             Transport.new({'transport': 'non_existent_transport'})
 
     def test_missing_worker_class(self):
-        """Passing a transport without a defined worker_class should raise 
RuntimeError"""
+        """Passing a transport without a defined worker_class should raise 
RuntimeError."""
         module = mock.MagicMock()
         del module.worker_class
         with mock.patch('importlib.import_module', lambda _: module):
@@ -27,7 +27,7 @@
                 Transport.new({'transport': 'invalid_transport'})
 
     def test_valid_transport(self):
-        """Passing a valid transport should return an instance of BaseWorker"""
+        """Passing a valid transport should return an instance of 
BaseWorker."""
         transports = [name for _, name, _ in 
pkgutil.iter_modules([os.path.join('cumin', 'transports')])]
         for transport in transports:
             self.assertIsInstance(Transport.new({'transport': transport}), 
BaseWorker)
diff --git a/cumin/tests/unit/test_transports.py 
b/cumin/tests/unit/test_transports.py
index 1024c1e..93fbd36 100644
--- a/cumin/tests/unit/test_transports.py
+++ b/cumin/tests/unit/test_transports.py
@@ -1,4 +1,4 @@
-"""Abstract worker tests"""
+"""Abstract worker tests."""
 
 import os
 import unittest
@@ -9,20 +9,20 @@
 
 
 class ConcreteBaseWorker(BaseWorker):
-    """Extend the BaseWorker"""
+    """Extend the BaseWorker."""
 
     def execute(self, hosts, commands, mode=None, handler=None, timeout=0, 
success_threshold=1):
-        """Required by BaseWorker"""
+        """Required by BaseWorker."""
 
     def get_results(self):
-        """Required by BaseWorker"""
+        """Required by BaseWorker."""
 
 
 class TestBaseWorker(unittest.TestCase):
-    """BaseWorker class tests"""
+    """Class BaseWorker tests."""
 
     def test_instantiation(self):
-        """BaseWorker rase it instantiated directly, should return an instance 
of BaseWorker if inherited"""
+        """Class BaseWorker rase it instantiated directly, should return an 
instance of BaseWorker if inherited."""
         with self.assertRaises(TypeError):
             BaseWorker({})
 
@@ -30,7 +30,7 @@
 
     @mock.patch.dict(os.environ, {}, clear=True)
     def test_init(self):
-        """Constructor should save config and set environment variables"""
+        """Constructor should save config and set environment variables."""
         env_dict = {'ENV_VARIABLE': 'env_value'}
         config = {'transport': 'test_transport',
                   'test_transport': {'environment': env_dict}}
diff --git a/cumin/tests/unit/transports/__init__.py 
b/cumin/tests/unit/transports/__init__.py
index e69de29..238fdb0 100644
--- a/cumin/tests/unit/transports/__init__.py
+++ b/cumin/tests/unit/transports/__init__.py
@@ -0,0 +1 @@
+"""Transport specific tests"""
diff --git a/cumin/tests/unit/transports/test_clustershell.py 
b/cumin/tests/unit/transports/test_clustershell.py
index 4949724..3936482 100644
--- a/cumin/tests/unit/transports/test_clustershell.py
+++ b/cumin/tests/unit/transports/test_clustershell.py
@@ -1,4 +1,4 @@
-"""ClusterShell transport tests"""
+"""ClusterShell transport tests."""
 import unittest
 
 import mock
@@ -7,24 +7,24 @@
 
 
 class TestWorkerClass(unittest.TestCase):
-    """ClusterShell backend worker_class test class"""
+    """ClusterShell backend worker_class test class."""
 
     @mock.patch('cumin.transports.clustershell.task_self')
     def test_worker_class(self, task_self):
-        """An instance of worker_class should be an instance of BaseWorker"""
+        """An instance of worker_class should be an instance of BaseWorker."""
         worker = clustershell.worker_class({})
         self.assertIsInstance(worker, BaseWorker)
         task_self.assert_called_once_with()
 
 
 class TestClusterShellWorker(unittest.TestCase):
-    """ClusterShell backend worker test class"""
+    """ClusterShell backend worker test class."""
 
     @mock.patch('cumin.transports.clustershell.SyncEventHandler', 
autospec=True)
     @mock.patch('cumin.transports.clustershell.AsyncEventHandler', 
autospec=True)
     @mock.patch('cumin.transports.clustershell.task_self')
     def setUp(self, task_self, async_event_handler, sync_event_handler):
-        """Initialize default properties and instances"""
+        """Initialize default properties and instances."""
         self.worker = clustershell.worker_class({})
         self.nodes = ['node1', 'node2']
         self.nodes_set = clustershell.NodeSet.fromlist(self.nodes)
@@ -35,7 +35,7 @@
 
     @mock.patch('cumin.transports.clustershell.task_self')
     def test_instantiation(self, task_self):
-        """An instance of ClusterShellWorker should be an instance of 
BaseWorker and initialize ClusterShell"""
+        """An instance of ClusterShellWorker should be an instance of 
BaseWorker and initialize ClusterShell."""
         worker = clustershell.ClusterShellWorker({'clustershell': 
{'ssh_options': ['option1', 'option2']}})
         self.assertIsInstance(worker, BaseWorker)
         task_self.assert_called_once_with()
@@ -43,56 +43,56 @@
             [mock.call('ssh_options', 'option1'), mock.call('ssh_options', 
'option2')])
 
     def test_execute_default_sync_handler(self):
-        """Calling execute() in sync mode without event handler should use the 
default sync event handler"""
+        """Calling execute() in sync mode without event handler should use the 
default sync event handler."""
         self.worker.execute(self.nodes, self.commands, 'sync', handler=True)
         self.worker.task.shell.assert_called_once_with(
             'command1', nodes=self.nodes_set, 
handler=self.sync_event_handler(self.nodes, self.commands))
 
     def test_execute_default_async_handler(self):
-        """Calling execute() in async mode without event handler should use 
the default async event handler"""
+        """Calling execute() in async mode without event handler should use 
the default async event handler."""
         self.worker.execute(self.nodes, self.commands, 'async', handler=True)
         self.worker.task.shell.assert_called_once_with(
             'command1', nodes=self.nodes_set, 
handler=self.async_event_handler(self.nodes, self.commands))
 
     def test_execute_timeout(self):
-        """Calling execute() and let the timeout expire should be handled by 
the default event handler"""
+        """Calling execute() and let the timeout expire should be handled by 
the default event handler."""
         self.worker.task.run = 
mock.Mock(side_effect=clustershell.ClusterShell.Task.TimeoutError)
         self.worker.execute(self.nodes, self.commands, 'sync', handler=True)
         # no exception raised
 
     def test_execute_no_hanlder(self):
-        """Calling execute() should call ClusterShell task without event 
handler"""
+        """Calling execute() should call ClusterShell task without event 
handler."""
         self.worker.execute(self.nodes, self.commands, 'async')
         self.worker.task.shell.assert_called_once_with('command1', 
nodes=self.nodes_set, handler=None)
 
     def test_execute_custom_handler(self):
-        """Calling execute() using a custom handler should call ClusterShell 
task with the custom event handler"""
+        """Calling execute() using a custom handler should call ClusterShell 
task with the custom event handler."""
         event_handler = mock.Mock(spec_set=clustershell.BaseEventHandler)
         self.worker.execute(self.nodes, self.commands, 'sync', 
handler=event_handler)
         self.worker.task.shell.assert_called_once_with('command1', 
nodes=self.nodes_set, handler=event_handler())
 
     def test_execute_no_commands(self):
-        """Calling execute() without commands should return without doing 
anything"""
+        """Calling execute() without commands should return without doing 
anything."""
         self.worker.execute(self.nodes, [], 'sync')
         self.assertFalse(self.worker.task.shell.called)
 
     def test_execute_one_command_no_mode(self):
-        """Calling execute() with only one command without mode should work 
without raising exceptions"""
+        """Calling execute() with only one command without mode should work 
without raising exceptions."""
         self.worker.execute(self.nodes, [self.commands[0]])
         self.worker.task.shell.assert_called_once_with('command1', 
nodes=self.nodes_set, handler=None)
 
     def test_execute_one_command_with_mode(self):
-        """Calling execute() with only one command with mode should work as if 
it was not specified"""
+        """Calling execute() with only one command with mode should work as if 
it was not specified."""
         self.worker.execute(self.nodes, [self.commands[0]], 'async')
         self.worker.task.shell.assert_called_once_with('command1', 
nodes=self.nodes_set, handler=None)
 
     def test_execute_wrong_mode(self):
-        """Calling execute() with the wrong mode should raise RuntimeError"""
+        """Calling execute() with the wrong mode should raise RuntimeError."""
         with self.assertRaisesRegexp(RuntimeError, 'Unknown mode'):
             self.worker.execute(self.nodes, self.commands, 'invalid_mode')
 
     def test_get_results(self):
-        """Calling get_results() should call ClusterShell iter_buffers with 
the right parameters"""
+        """Calling get_results() should call ClusterShell iter_buffers with 
the right parameters."""
         self.worker.task.iter_buffers = TestClusterShellWorker.iter_buffers
         self.worker.execute(self.nodes, self.commands, 'async')
         for nodes, output in self.worker.get_results():
@@ -102,15 +102,16 @@
 
     @staticmethod
     def iter_buffers():
-        """A generator to simulate the buffer iteration of ClusterShell 
objects"""
+        """A generator to simulate the buffer iteration of ClusterShell 
objects."""
         for i in xrange(10):
             yield 'output {}'.format(i), ['node{}0'.format(i), 
'node{}1'.format(i), 'node{}2'.format(i)]
 
 
 class TestBaseEventHandler(unittest.TestCase):
-    """BaseEventHandler test class"""
+    """BaseEventHandler test class."""
+
     def setUp(self, *args):
-        """Initialize default properties and instances"""
+        """Initialize default properties and instances."""
         self.nodes = ['node1', 'node2']
         self.commands = ['command1', 'command2']
         self.worker = mock.MagicMock()
@@ -120,7 +121,7 @@
 
     @mock.patch('cumin.transports.clustershell.colorama')
     def test_close(self, colorama):
-        """Calling close should raise NotImplementedError"""
+        """Calling close should raise NotImplementedError."""
         self.handler = clustershell.BaseEventHandler(self.nodes, self.commands)
         with self.assertRaises(NotImplementedError):
             self.handler.close(self.worker)
@@ -128,52 +129,52 @@
 
 
 class ConcreteBaseEventHandler(clustershell.BaseEventHandler):
-    """Concrete implementation of a BaseEventHandler"""
+    """Concrete implementation of a BaseEventHandler."""
 
     def __init__(self, nodes, commands, **kwargs):
-        """Initialize progress bars"""
+        """Initialize progress bars."""
         super(ConcreteBaseEventHandler, self).__init__(nodes, commands, 
**kwargs)
         self.pbar_ok = mock.Mock()
         self.pbar_ko = mock.Mock()
 
     def close(self, worker):
-        """Required by the BaseEventHandler class"""
+        """Required by the BaseEventHandler class."""
 
 
 class TestConcreteBaseEventHandler(TestBaseEventHandler):
-    """ConcreteBaseEventHandler test class"""
+    """ConcreteBaseEventHandler test class."""
 
     @mock.patch('cumin.transports.clustershell.colorama')
     def setUp(self, colorama):
-        """Initialize default properties and instances"""
+        """Initialize default properties and instances."""
         super(TestConcreteBaseEventHandler, self).setUp()
         self.handler = ConcreteBaseEventHandler(self.nodes, self.commands)
         self.worker.eh = self.handler
         self.colorama = colorama
 
     def test_instantiation(self):
-        """An instance of ConcreteBaseEventHandler should be an instance of 
BaseEventHandler and initialize colorama"""
+        """An instance of ConcreteBaseEventHandler should be an instance of 
BaseEventHandler and initialize colorama."""
         self.assertListEqual(self.handler.nodes, self.nodes)
         self.colorama.init.assert_called_once_with()
 
     def test_ev_error(self):
-        """Calling ev_error should update the fail progress bar"""
+        """Calling ev_error should update the fail progress bar."""
         self.handler.ev_error(self.worker)
         self.handler.pbar_ko.update.assert_called_once_with()
 
     def test_ev_timeout(self):
-        """Calling test_ev_timeout should update the fail progress bar"""
+        """Calling test_ev_timeout should update the fail progress bar."""
         self.handler.ev_timeout(self.worker)
         self.assertTrue(self.handler.pbar_ko.update.called)
 
 
 class TestSyncEventHandler(TestBaseEventHandler):
-    """SyncEventHandler test class"""
+    """SyncEventHandler test class."""
 
     @mock.patch('cumin.transports.clustershell.colorama')
     @mock.patch('cumin.transports.clustershell.tqdm')
     def setUp(self, tqdm, colorama):
-        """Initialize default properties and instances"""
+        """Initialize default properties and instances."""
         super(TestSyncEventHandler, self).setUp(tqdm, colorama)
         self.handler = clustershell.SyncEventHandler(self.nodes, self.commands)
         self.worker.eh = self.handler
@@ -182,12 +183,12 @@
             self.handler.ev_start(self.worker)
 
     def test_instantiation(self):
-        """An instance of SyncEventHandler should be an instance of 
BaseEventHandler and initialize nodes_commands"""
+        """An instance of SyncEventHandler should be an instance of 
BaseEventHandler and initialize nodes_commands."""
         self.assertIsInstance(self.handler, clustershell.BaseEventHandler)
         self.assertListEqual(self.handler.nodes_commands, self.commands)
 
     def test_ev_start(self):
-        """Calling ev_start should initialize tqdm and refresh it"""
+        """Calling ev_start should initialize tqdm and refresh it."""
         self.assertTrue(self.handler.pbar_ok.refresh.called)
 
         # Running it again should fail
@@ -195,19 +196,19 @@
             self.handler.ev_start(self.worker)
 
     def test_ev_hup_ok(self):
-        """Calling ev_hup with a worker that has exit status zero should 
update the success progress bar"""
+        """Calling ev_hup with a worker that has exit status zero should 
update the success progress bar."""
         self.worker.current_rc = 0
         self.handler.ev_hup(self.worker)
         self.assertTrue(self.handler.pbar_ok.update.called)
 
     def test_ev_hup_ko(self):
-        """Calling ev_hup with a worker that has exit status non-zero should 
update the failed progress bar"""
+        """Calling ev_hup with a worker that has exit status non-zero should 
update the failed progress bar."""
         self.worker.current_rc = 1
         self.handler.ev_hup(self.worker)
         self.assertTrue(self.handler.pbar_ko.update.called)
 
     def test_ev_close(self):
-        """Calling close should close progress bars"""
+        """Calling close should close progress bars."""
         self.worker.task.iter_buffers = TestClusterShellWorker.iter_buffers
         self.worker.num_timeout.return_value = 0
         self.handler.ev_close(self.worker)
@@ -215,23 +216,23 @@
 
 
 class TestAsyncEventHandler(TestBaseEventHandler):
-    """AsyncEventHandler test class"""
+    """AsyncEventHandler test class."""
 
     @mock.patch('cumin.transports.clustershell.colorama')
     @mock.patch('cumin.transports.clustershell.tqdm')
     def setUp(self, tqdm, colorama):
-        """Initialize default properties and instances"""
+        """Initialize default properties and instances."""
         super(TestAsyncEventHandler, self).setUp()
         self.handler = clustershell.AsyncEventHandler(self.nodes, 
self.commands)
         self.worker.eh = self.handler
 
     def test_instantiation(self):
-        """An instance of AsyncEventHandler should be an instance of 
BaseEventHandler and initialize progress bars"""
+        """An instance of AsyncEventHandler should be an instance of 
BaseEventHandler and initialize progress bars."""
         self.assertIsInstance(self.handler, clustershell.BaseEventHandler)
         self.assertTrue(self.handler.pbar_ok.refresh.called)
 
     def test_ev_pickup(self):
-        """Calling ev_pickup should not raise exception the first time, raise 
the second one"""
+        """Calling ev_pickup should not raise exception the first time, raise 
the second one."""
         self.handler.ev_start(self.worker)
         self.handler.ev_pickup(self.worker)
         # no exception raised
@@ -240,7 +241,7 @@
             self.handler.ev_pickup(self.worker)
 
     def test_ev_hup_ok(self):
-        """Calling ev_hup with a worker that has zero exit status should 
update enqueue the next command"""
+        """Calling ev_hup with a worker that has zero exit status should 
update enqueue the next command."""
         self.handler.ev_start(self.worker)
         self.handler.ev_pickup(self.worker)
         self.worker.current_rc = 0
@@ -256,7 +257,7 @@
         self.assertTrue(self.handler.pbar_ok.update.called)
 
     def test_ev_hup_ko(self):
-        """Calling ev_hup with a worker that has non-zero exit status should 
not enqueue the next command"""
+        """Calling ev_hup with a worker that has non-zero exit status should 
not enqueue the next command."""
         self.handler.ev_start(self.worker)
         self.handler.ev_pickup(self.worker)
         self.worker.current_rc = 1
@@ -266,7 +267,7 @@
 
     @mock.patch('cumin.transports.clustershell.colorama')
     def test_close(self, colorama):
-        """Calling close with a worker should close progress bars"""
+        """Calling close with a worker should close progress bars."""
         self.handler.ev_start(self.worker)
         self.worker.task.iter_buffers = TestClusterShellWorker.iter_buffers
         self.worker.num_timeout.return_value = 0
diff --git a/cumin/transport.py b/cumin/transport.py
index 10e9117..9ad8e66 100644
--- a/cumin/transport.py
+++ b/cumin/transport.py
@@ -1,18 +1,18 @@
-"""Transport factory"""
+"""Transport factory."""
 
 import importlib
 
 
 class Transport(object):
-    """Transport factory class"""
+    """Transport factory class."""
 
     @staticmethod
     def new(config, logger=None):
-        """ Return an instance of the worker class for the configured transport
+        """Return an instance of the worker class for the configured transport.
 
-            Arguments:
-            config - the configuration dictionary
-            logger - an optional logging instance [optional, default: None]
+        Arguments:
+        config - the configuration dictionary
+        logger - an optional logging instance [optional, default: None]
         """
         try:
             module = 
importlib.import_module('cumin.transports.{transport}'.format(transport=config['transport']))
diff --git a/cumin/transports/__init__.py b/cumin/transports/__init__.py
index 50ffde9..b9ed6bc 100644
--- a/cumin/transports/__init__.py
+++ b/cumin/transports/__init__.py
@@ -1,4 +1,4 @@
-"""Abstract transport"""
+"""Abstract transport."""
 
 import logging
 import os
@@ -7,16 +7,16 @@
 
 
 class BaseWorker(object):
-    """Worker interface"""
+    """Worker interface."""
 
     __metaclass__ = ABCMeta
 
     def __init__(self, config, logger=None):
-        """ Worker constructor. Setup environment variables.
+        """Worker constructor. Setup environment variables.
 
-            Arguments:
-            config -- a dictionary with the parsed configuration file
-            logger -- an optional logger instance [optional, default: None]
+        Arguments:
+        config -- a dictionary with the parsed configuration file
+        logger -- an optional logger instance [optional, default: None]
         """
         self.config = config
         self.logger = logger or logging.getLogger(__name__)
@@ -27,24 +27,24 @@
 
     @abstractmethod
     def execute(self, hosts, commands, mode=None, handler=None, timeout=0, 
success_threshold=1):
-        """ Execute the given commands on the given hosts
+        """Execute the given commands on the given hosts.
 
-            Arguments:
-            hosts             -- a list of hosts to target for the execution 
of the commands
-            commands          -- a list of commands to be executed on the hosts
-            mode              -- the mode of operation, needed only when more 
than one command is specified. It depends
-                                 on the actual transport chosen. Typical 
values are: sync, async.
-                                 [optional, default: None]
-            handler           -- an event handler to be notified of the 
progress during execution. Its interface
-                                 depends on the actual transport chosen. 
Accepted values are: None => don't use an
-                                 event handler (default), True => use the 
transport's default event hander, an event
-                                 handler class. [optional, default: None]
-            timeout           -- the timeout in seconds for the whole 
execution. [optional, default: 0 (unlimited)]
-            success_threshold -- The success ratio threshold that must be 
reached to consider the run successful. A
-                                 float between 0 and 1. The specific meaning 
might change based on the chosen transport.
-                                 [optional, default: 1]
+        Arguments:
+        hosts             -- a list of hosts to target for the execution of 
the commands
+        commands          -- a list of commands to be executed on the hosts
+        mode              -- the mode of operation, needed only when more than 
one command is specified. It depends
+                             on the actual transport chosen. Typical values 
are: sync, async.
+                             [optional, default: None]
+        handler           -- an event handler to be notified of the progress 
during execution. Its interface
+                             depends on the actual transport chosen. Accepted 
values are: None => don't use an
+                             event handler (default), True => use the 
transport's default event hander, an event
+                             handler class. [optional, default: None]
+        timeout           -- the timeout in seconds for the whole execution. 
[optional, default: 0 (unlimited)]
+        success_threshold -- The success ratio threshold that must be reached 
to consider the run successful. A
+                             float between 0 and 1. The specific meaning might 
change based on the chosen transport.
+                             [optional, default: 1]
         """
 
     @abstractmethod
     def get_results(self):
-        """Generator that yields the results of the current execution"""
+        """Generator that yields the results of the current execution."""
diff --git a/cumin/transports/clustershell.py b/cumin/transports/clustershell.py
index d4e6523..b5827f3 100644
--- a/cumin/transports/clustershell.py
+++ b/cumin/transports/clustershell.py
@@ -1,4 +1,4 @@
-"""ClusterShell transport: worker and event handlers"""
+"""Transport ClusterShell: worker and event handlers."""
 
 from collections import defaultdict
 
@@ -12,12 +12,12 @@
 
 
 class ClusterShellWorker(BaseWorker):
-    """ClusterShell worker, extends BaseWorker"""
+    """Worker ClusterShell, extends BaseWorker."""
 
     def __init__(self, config, logger=None):
-        """ ClusterShell worker constructor
+        """Worker ClusterShell constructor.
 
-            Arguments: according to BaseQuery interface
+        Arguments: according to BaseQuery interface
         """
         super(ClusterShellWorker, self).__init__(config, logger)
         self.task = task_self()  # Initialize a ClusterShell task
@@ -30,7 +30,7 @@
             self.task.set_info('ssh_options', option)
 
     def execute(self, hosts, commands, mode=None, handler=None, timeout=0, 
success_threshold=1):
-        """Required by BaseWorker"""
+        """Required by BaseWorker."""
         if len(commands) == 0:
             self.logger.warning('No commands provided')
             return
@@ -62,26 +62,26 @@
                 handler.close(self.task)
 
     def get_results(self):
-        """Required by BaseWorker"""
+        """Required by BaseWorker."""
         for output, nodelist in self.task.iter_buffers():
             yield NodeSet.fromlist(nodelist), output
 
 
 class BaseEventHandler(ClusterShell.Event.EventHandler):
-    """ClusterShell event handler extension base class"""
+    """ClusterShell event handler extension base class."""
 
     short_command_length = 35
 
     def __init__(self, nodes, commands, **kwargs):
-        """ ClusterShell event handler extension constructor
+        """Event handler ClusterShell extension constructor.
 
-            If inherited classes defines a self.pbar_ko tqdm progress bar, it 
will be updated on ev_error and
-            ev_timeout events.
+        If inherited classes defines a self.pbar_ko tqdm progress bar, it will 
be updated on ev_error and ev_timeout
+        events.
 
-            Arguments:
-            nodes    -- the list of nodes with which this worker was 
initiliazed
-            commands -- the list of commands that has to be executed on the 
nodes
-            **kwargs -- optional additional keyword arguments that might be 
used by classes that extend this base class
+        Arguments:
+        nodes    -- the list of nodes with which this worker was initiliazed
+        commands -- the list of commands that has to be executed on the nodes
+        **kwargs -- optional additional keyword arguments that might be used 
by classes that extend this base class
         """
         super(BaseEventHandler, self).__init__()
         self.nodes = nodes
@@ -99,17 +99,17 @@
                            '[{elapsed}<{remaining}, {rate_fmt}]') + 
colorama.Style.RESET_ALL
 
     def close(self, task):
-        """ Additional method called at the end of the execution, useful for 
reporting and final actions
+        """Additional method called at the end of the execution, useful for 
reporting and final actions.
 
-            Arguments:
-            task -- a ClusterShell Task instance
+        Arguments:
+        task -- a ClusterShell Task instance
         """
         raise NotImplementedError
 
     def ev_error(self, worker):
-        """ Update the current fail progress bar and print the error
+        """Update the current fail progress bar and print the error.
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         self.failed_commands[worker.command].append(worker.current_node)
         if self.pbar_ko is not None:
@@ -117,22 +117,22 @@
         tqdm.write(worker.current_errmsg)
 
     def ev_timeout(self, worker):
-        """ Update the current fail progress bar
+        """Update the current fail progress bar.
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         if self.pbar_ko is not None:
             self.pbar_ko.update(worker.num_timeout())
 
     def _print_report_line(self, num, tot, message, color=colorama.Fore.RED, 
nodes=None):
-        """ Helper to print a tqdm-friendly colored status line with 
success/failure ratio and optional list of nodes
+        """Helper to print a tqdm-friendly colored status line with 
success/failure ratio and optional list of nodes.
 
-            Arguments:
-            num     - the number of affecte nodes
-            tot     - the total number of nodes
-            message - the message to print
-            color   - the colorama color to use for the line [optional, 
default: colorama.Fore.RED]
-            nodes   - the list of nodes affected [optional, default: None]
+        Arguments:
+        num     - the number of affecte nodes
+        tot     - the total number of nodes
+        message - the message to print
+        color   - the colorama color to use for the line [optional, default: 
colorama.Fore.RED]
+        nodes   - the list of nodes affected [optional, default: None]
         """
         if nodes is None:
             nodes = ''
@@ -146,20 +146,20 @@
             nodes_color=colorama.Fore.CYAN, nodes=nodes, 
reset=colorama.Style.RESET_ALL))
 
     def _get_short_command(self, command):
-        """ Return a shortened representation of a command omitting the 
central part
+        """Return a shortened representation of a command omitting the central 
part.
 
-            Arguments:
-            command - the command to be shortened
+        Arguments:
+        command - the command to be shortened
         """
         sublen = (self.short_command_length - 3) // 2  # The -3 is for the 
ellipsis
         return (command[:sublen] + '...' + command[-sublen:]) if len(command) 
> self.short_command_length else command
 
     def _commands_output_report(self, buffer_iterator, command=None):
-        """ Helper to print the commands output in a colored and tqdm-friendly 
way
+        """Helper to print the commands output in a colored and tqdm-friendly 
way.
 
-            Arguments:
-            buffer_iterator - any ClusterShell object that implements 
iter_buffers() like Task and Worker objects.
-            command         - command the output is referring to [optional, 
default: None]
+        Arguments:
+        buffer_iterator - any ClusterShell object that implements 
iter_buffers() like Task and Worker objects.
+        command         - command the output is referring to [optional, 
default: None]
         """
         nodelist = None
         if command is not None:
@@ -183,10 +183,10 @@
         tqdm.write(colorama.Fore.BLUE + message + colorama.Style.RESET_ALL)
 
     def _timeout_nodes_report(self, buffer_iterator):
-        """ Helper to print the nodes that timed out in a colored and 
tqdm-friendly way
+        """Helper to print the nodes that timed out in a colored and 
tqdm-friendly way.
 
-            Arguments:
-            buffer_iterator - any ClusterShell object that implements 
iter_buffers() like Task and Worker objects.
+        Arguments:
+        buffer_iterator - any ClusterShell object that implements 
iter_buffers() like Task and Worker objects.
         """
         timeout = buffer_iterator.num_timeout()
         if timeout == 0:
@@ -196,10 +196,10 @@
         self._print_report_line(timeout, tot, 'of nodes timed out', 
nodes=buffer_iterator.iter_keys_timeout())
 
     def _failed_commands_report(self, filter_command=None):
-        """ Helper to print the nodes that failed to execute commands in a 
colored and tqdm-friendly way
+        """Helper to print the nodes that failed to execute commands in a 
colored and tqdm-friendly way.
 
-            Arguments:
-            filter_command - print only the nodes that failed to execute this 
specific command [optional, default: None]
+        Arguments:
+        filter_command - print only the nodes that failed to execute this 
specific command [optional, default: None]
         """
         tot = len(self.nodes)
         for command, nodes in self.failed_commands.iteritems():
@@ -214,7 +214,7 @@
             self._print_report_line(fail, tot, message, nodes=nodes)
 
     def _success_nodes_report(self):
-        """Helper to print how many nodes succesfully executed all commands in 
a colored and tqdm-friendly way"""
+        """Helper to print how many nodes succesfully executed all commands in 
a colored and tqdm-friendly way."""
         tot = len(self.nodes)
         succ = len(self.success_nodes)
         message = 'of nodes succesfully executed all commands'
@@ -234,32 +234,32 @@
 
 
 class SyncEventHandler(BaseEventHandler):
-    """ Custom ClusterShell event handler class that execute commands 
synchronously
+    """Custom ClusterShell event handler class that execute commands 
synchronously.
 
-        The implemented logic is:
-        - execute command_N on all nodes where command_N-1 was successful (all 
nodes at first iteration)
-        - if success ratio of the execution of command_N < success threshold, 
then:
-          - abort the execution
-        - else:
-          - re-start from the top with N=N+1
+    The implemented logic is:
+    - execute command_N on all nodes where command_N-1 was successful (all 
nodes at first iteration)
+    - if success ratio of the execution of command_N < success threshold, then:
+      - abort the execution
+    - else:
+      - re-start from the top with N=N+1
 
-        The typical use case is to orchestrate some operation across a fleet, 
ensuring that each command is completed
-        by enough hosts before proceeding with the next one.
+    The typical use case is to orchestrate some operation across a fleet, 
ensuring that each command is completed by
+    enough hosts before proceeding with the next one.
     """
 
     def __init__(self, nodes, commands, **kwargs):
-        """ Custom ClusterShell synchronous event handler constructor
+        """Custom ClusterShell synchronous event handler constructor.
 
-            Arguments: according to BaseEventHandler interface
+        Arguments: according to BaseEventHandler interface
         """
         super(SyncEventHandler, self).__init__(nodes, commands, **kwargs)
         # Slicing the commands list to get a copy
         self.nodes_commands = commands[:]
 
     def ev_start(self, worker):
-        """ Worker started, initialize progress bars and variables for this 
command execution
+        """Worker started, initialize progress bars and variables for this 
command execution.
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         command = self.nodes_commands.pop(0)
         self.success_nodes = []
@@ -276,11 +276,11 @@
         self.pbar_ko.refresh()
 
     def ev_hup(self, worker):
-        """ Command execution completed
+        """Command execution completed.
 
-            Update the progress bars and keep track of nodes based on the 
success/failure of the command's execution
+        Update the progress bars and keep track of nodes based on the 
success/failure of the command's execution
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         if worker.current_rc != 0:
             self.pbar_ko.update()
@@ -290,9 +290,9 @@
             self.success_nodes.append(worker.current_node)
 
     def ev_close(self, worker):
-        """ Worker terminated, print the output of the command execution and 
the summary report lines
+        """Worker terminated, print the output of the command execution and 
the summary report lines.
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         self._commands_output_report(worker, command=worker.command)
 
@@ -327,28 +327,28 @@
         self._print_report_line(succ, tot, message, color=color)
 
     def close(self, task):
-        """ Print a final summary report line
+        """Print a final summary report line.
 
-            Arguments: according to BaseEventHandler interface
+        Arguments: according to BaseEventHandler interface
         """
         if len(self.commands) > 1 and len(self.nodes_commands) == 0:
             self._success_nodes_report()
 
 
 class AsyncEventHandler(BaseEventHandler):
-    """ Custom ClusterShell event handler class that execute commands 
asynchronously
+    """Custom ClusterShell event handler class that execute commands 
asynchronously.
 
-        The implemented logic is to execute on all nodes, independently one to 
each other, command_N only if
-        command_N-1 was succesful, aborting the execution on that node 
otherwise.
+    The implemented logic is to execute on all nodes, independently one to 
each other, command_N only if command_N-1
+    was succesful, aborting the execution on that node otherwise.
 
-        The typical use case is to execute read-only commands to gather the 
status of a fleet without any special need
-        of orchestration between the hosts.
+    The typical use case is to execute read-only commands to gather the status 
of a fleet without any special need of
+    orchestration between the hosts.
     """
 
     def __init__(self, nodes, commands, **kwargs):
-        """ Custom ClusterShell asynchronous event handler constructor
+        """Custom ClusterShell asynchronous event handler constructor.
 
-            Arguments: according to BaseEventHandler interface
+        Arguments: according to BaseEventHandler interface
         """
         super(AsyncEventHandler, self).__init__(nodes, commands, **kwargs)
         # Map commands to all nodes .Slicing the commands list to get a copy
@@ -364,21 +364,21 @@
         self.pbar_ko.refresh()
 
     def ev_pickup(self, worker):
-        """ Command execution started, remove the command from the node's queue
+        """Command execution started, remove the command from the node's queue.
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         command = self.nodes_commands[worker.current_node].pop(0)
         if command != worker.command:
             raise RuntimeError('{} != {}'.format(command, worker.command))
 
     def ev_hup(self, worker):
-        """ Command execution completed
+        """Command execution completed.
 
-            Enqueue the next command if the previous was successful, track the 
failure otherwise
-            Update the progress bars accordingly
+        Enqueue the next command if the previous was successful, track the 
failure otherwise
+        Update the progress bars accordingly
 
-            Arguments: according to EventHandler interface
+        Arguments: according to EventHandler interface
         """
         if worker.current_rc != 0:
             self.pbar_ko.update()
@@ -394,9 +394,9 @@
             self.success_nodes.append(worker.current_node)
 
     def close(self, task):
-        """ Properly close all progress bars and print results
+        """Properly close all progress bars and print results.
 
-            Arguments: according to BaseEventHandler interface
+        Arguments: according to BaseEventHandler interface
         """
         self._commands_output_report(task)
 

-- 
To view, visit https://gerrit.wikimedia.org/r/339834
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia0ae7dd9ffa08e7bf3ed7b40b75176400249a959
Gerrit-PatchSet: 1
Gerrit-Project: operations/software/cumin
Gerrit-Branch: master
Gerrit-Owner: Volans <rcocci...@wikimedia.org>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to