Volans has uploaded a new change for review. (
https://gerrit.wikimedia.org/r/386400 )
Change subject: Logging: use % syntax for parameters
......................................................................
Logging: use % syntax for parameters
* For optimization purposes and to adhere to Python best practices, use
%s syntax in logging messages and pass the replacement parameters to
the logging function. Some messages are still pre-formatted before the
call to the logging function because used also for other purposes.
* pylint: re-enable the check for logging-format-interpolation.
Bug: T179002
Change-Id: I3ed78ec8e9c6e502c0f8a3b10ea5420e36953051
---
M cumin/backends/__init__.py
M cumin/backends/direct.py
M cumin/backends/openstack.py
M cumin/backends/puppetdb.py
M cumin/cli.py
M cumin/query.py
M cumin/transports/__init__.py
M cumin/transports/clustershell.py
M prospector.yaml
9 files changed, 37 insertions(+), 51 deletions(-)
git pull ssh://gerrit.wikimedia.org:29418/operations/software/cumin
refs/changes/00/386400/1
diff --git a/cumin/backends/__init__.py b/cumin/backends/__init__.py
index 9961f1c..a3f8fde 100644
--- a/cumin/backends/__init__.py
+++ b/cumin/backends/__init__.py
@@ -35,8 +35,7 @@
"""
self.config = config
self.logger = logging.getLogger('.'.join((self.__module__,
self.__class__.__name__)))
- self.logger.trace('Backend {name} created with config:
{config}'.format(
- name=type(self).__name__, config=config))
+ self.logger.trace('Backend %s created with config: %s',
type(self).__name__, config)
def execute(self, query_string):
"""Build and execute the query, return the NodeSet of FQDN hostnames
that matches.
@@ -74,9 +73,9 @@
Arguments:
query_string (str): the query string to be parsed.
"""
- self.logger.trace('Parsing query: {query}'.format(query=query_string))
+ self.logger.trace('Parsing query: %s', query_string)
parsed = self.grammar.parseString(query_string.strip(), parseAll=True)
- self.logger.trace('Parsed query: {parsed}'.format(parsed=parsed))
+ self.logger.trace('Parsed query: %s', parsed)
for token in parsed:
self._parse_token(token)
@@ -111,7 +110,7 @@
self.stack = self._get_stack_element()
self.stack_pointer = self.stack
super(BaseQueryAggregator, self)._build(query_string)
- self.logger.trace('Query stack: {stack}'.format(stack=self.stack))
+ self.logger.trace('Query stack: %s', self.stack)
def _execute(self):
"""Concrete implementation of parent abstract method.
@@ -121,7 +120,7 @@
"""
hosts = NodeSet()
self._loop_stack(hosts, self.stack) # The hosts nodeset is updated in
place while looping the stack
- self.logger.debug('Found {num} hosts'.format(num=len(hosts)))
+ self.logger.debug('Found %d hosts', len(hosts))
return hosts
@@ -182,8 +181,7 @@
bool_operator (str, None): the boolean operator to apply while
aggregating the two NodeSet. It must be
:py:data:`None` when adding the first hosts.
"""
- self.logger.trace("Aggregating: {hosts} | {boolean} |
{element_hosts}".format(
- hosts=hosts, boolean=bool_operator, element_hosts=element_hosts))
+ self.logger.trace("Aggregating: %s | %s | %s", hosts, bool_operator,
element_hosts)
# This should never happen
if (bool_operator is None and hosts) or (bool_operator is not None and
not hosts): # pragma: no cover
diff --git a/cumin/backends/direct.py b/cumin/backends/direct.py
index 0a9f6e8..5b4cc9e 100644
--- a/cumin/backends/direct.py
+++ b/cumin/backends/direct.py
@@ -74,7 +74,7 @@
type=type(token), token=token))
token_dict = token.asDict()
- self.logger.trace('Token is: {token_dict} |
{token}'.format(token_dict=token_dict, token=token))
+ self.logger.trace('Token is: %s | %s', token_dict, token)
if 'hosts' in token_dict:
element = self._get_stack_element()
diff --git a/cumin/backends/openstack.py b/cumin/backends/openstack.py
index 56e0235..3fb0b8f 100644
--- a/cumin/backends/openstack.py
+++ b/cumin/backends/openstack.py
@@ -172,7 +172,7 @@
type=type(token), token=token))
token_dict = token.asDict()
- self.logger.trace('Token is: {token_dict} |
{token}'.format(token_dict=token_dict, token=token))
+ self.logger.trace('Token is: %s | %s', token_dict, token)
if 'key' in token_dict and 'value' in token_dict:
if token_dict['key'] == 'project':
diff --git a/cumin/backends/puppetdb.py b/cumin/backends/puppetdb.py
index 1e76a9b..c6676af 100644
--- a/cumin/backends/puppetdb.py
+++ b/cumin/backends/puppetdb.py
@@ -217,7 +217,7 @@
self.grouped_tokens = PuppetDBQuery._get_grouped_tokens()
self.current_group = self.grouped_tokens
super(PuppetDBQuery, self)._build(query_string)
- self.logger.trace('Query tokens:
{tokens}'.format(tokens=self.grouped_tokens))
+ self.logger.trace('Query tokens: %s', self.grouped_tokens)
def _execute(self):
"""Concrete implementation of parent abstract method.
@@ -232,8 +232,7 @@
query =
self._get_query_string(group=self.grouped_tokens).format(host_key=self.hosts_keys[self.endpoint])
hosts = self._api_call(query)
unique_hosts = NodeSet.fromlist([host[self.hosts_keys[self.endpoint]]
for host in hosts])
- self.logger.debug("Queried puppetdb for '{query}', got '{num}'
results.".format(
- query=query, num=len(unique_hosts)))
+ self.logger.debug("Queried puppetdb for '%s', got '%d' results.",
query, len(unique_hosts))
return unique_hosts
diff --git a/cumin/cli.py b/cumin/cli.py
index cb75b5d..f2ac2d5 100644
--- a/cumin/cli.py
+++ b/cumin/cli.py
@@ -241,7 +241,7 @@
# stderr(message)
# break
# else:
- # logger.warning('Execution interrupted by Ctrl+c/SIGINT: got invalid
answer for {i} times'.format(i=i))
+ # logger.warning('Execution interrupted by Ctrl+c/SIGINT: got invalid
answer for %d times', i)
# raise KeyboardInterruptError
@@ -414,7 +414,7 @@
if args.transport is not None:
config['transport'] = args.transport
- logger.info("Cumin called by user '{user}' with args:
{args}".format(user=user, args=args))
+ logger.info("Cumin called by user '%s' with args: %s", user, args)
# Execution
try:
diff --git a/cumin/query.py b/cumin/query.py
index 8f005e0..87ca4d3 100644
--- a/cumin/query.py
+++ b/cumin/query.py
@@ -108,7 +108,7 @@
type=type(token), token=token))
token_dict = token.asDict()
- self.logger.trace('Token is: {token}'.format(token=token_dict))
+ self.logger.trace('Token is: %s', token_dict)
if self._replace_alias(token_dict):
return # This token was an alias and got replaced
diff --git a/cumin/transports/__init__.py b/cumin/transports/__init__.py
index d046cc2..cf922c4 100644
--- a/cumin/transports/__init__.py
+++ b/cumin/transports/__init__.py
@@ -383,8 +383,8 @@
if batch_size is None:
batch_size = hosts_size
elif batch_size > hosts_size:
- self.logger.debug(("Provided batch_size '{batch_size}' is greater
than the number of hosts '{hosts_size}'"
- ", using '{hosts_size}' as
value").format(batch_size=batch_size, hosts_size=hosts_size))
+ self.logger.debug(("Provided batch_size '%d' is greater than the
number of hosts '%d'"
+ ", using '%d' as value"), batch_size,
hosts_size, hosts_size)
batch_size = hosts_size
return batch_size
@@ -420,8 +420,7 @@
self.config = config
self.target = target
self.logger = logging.getLogger('.'.join((self.__module__,
self.__class__.__name__)))
- self.logger.trace('Transport {name} created with config:
{config}'.format(
- name=type(self).__name__, config=config))
+ self.logger.trace('Transport %s created with config: %s',
type(self).__name__, config)
# Initialize setters values
self._commands = None
diff --git a/cumin/transports/clustershell.py b/cumin/transports/clustershell.py
index 36f5c8f..50cc224 100644
--- a/cumin/transports/clustershell.py
+++ b/cumin/transports/clustershell.py
@@ -57,8 +57,8 @@
self._handler_instance = self.handler( # pylint: disable=not-callable
self.target, self.commands,
success_threshold=self.success_threshold)
- self.logger.info("Executing commands {commands} on '{num}' hosts:
{hosts}".format(
- commands=self.commands, num=len(self.target.hosts),
hosts=self.target.hosts))
+ self.logger.info(
+ "Executing commands %s on '%d' hosts: %s", self.commands,
len(self.target.hosts), self.target.hosts)
self.task.shell(self.commands[0].command,
nodes=self.target.first_batch, handler=self._handler_instance,
timeout=self.commands[0].timeout)
@@ -205,8 +205,7 @@
task (ClusterShell.Task.Task): a ClusterShell Task instance.
"""
num_timeout = task.num_timeout()
- self.logger.error('global timeout was triggered while {num} nodes were
executing a command'.format(
- num=num_timeout))
+ self.logger.error('Global timeout was triggered while %d nodes were
executing a command', num_timeout)
self.lock.acquire() # Avoid modifications of the same data from other
callbacks triggered by ClusterShell
try:
@@ -233,8 +232,7 @@
:Parameters:
according to parent
:py:meth:`ClusterShell.Event.EventHandler.ev_pickup`.
"""
- self.logger.debug("node={node}, command='{command}'".format(
- node=worker.current_node, command=worker.command))
+ self.logger.debug("node=%s, command='%s'", worker.current_node,
worker.command)
self.lock.acquire() # Avoid modifications of the same data from other
callbacks triggered by ClusterShell
try:
@@ -276,8 +274,7 @@
according to parent
:py:meth:`ClusterShell.Event.EventHandler.ev_timeout`.
"""
delta_timeout = worker.task.num_timeout() - self.counters['timeout']
- self.logger.debug("command='{command}', delta_timeout={num}".format(
- command=worker.command, num=delta_timeout))
+ self.logger.debug("command='%s', delta_timeout=%d", worker.command,
delta_timeout)
self.lock.acquire() # Avoid modifications of the same data from other
callbacks triggered by ClusterShell
try:
@@ -383,13 +380,13 @@
timeout = [node.name for node in self.nodes.itervalues() if
node.state.is_timeout]
timeout_desc = 'of nodes were executing a command when the global
timeout occurred'
timeout_message, timeout_nodes = self._get_log_message(len(timeout),
timeout_desc, nodes=timeout)
- self.logger.error('{message}{nodes}'.format(message=timeout_message,
nodes=timeout_nodes))
+ self.logger.error('%s%s', timeout_message, timeout_nodes)
self._print_report_line(timeout_message, nodes_string=timeout_nodes)
not_run = [node.name for node in self.nodes.itervalues() if
node.state.is_pending or node.state.is_scheduled]
not_run_desc = 'of nodes were pending execution when the global
timeout occurred'
not_run_message, not_run_nodes = self._get_log_message(len(not_run),
not_run_desc, nodes=not_run)
- self.logger.error('{message}{nodes}'.format(message=not_run_message,
nodes=not_run_nodes))
+ self.logger.error('%s%s', not_run_message, not_run_nodes)
self._print_report_line(not_run_message, nodes_string=not_run_nodes)
def _failed_commands_report(self, filter_command_index=-1):
@@ -413,7 +410,7 @@
message = "of nodes {state} to execute command
'{command}'".format(
state=State.states_representation[state],
command=self._get_short_command(command))
log_message, nodes_string = self._get_log_message(len(nodes),
message, nodes=nodes)
-
self.logger.error('{message}{nodes}'.format(message=log_message,
nodes=nodes_string))
+ self.logger.error('%s%s', log_message, nodes_string)
self._print_report_line(log_message, nodes_string=nodes_string)
def _success_nodes_report(self, command=None):
@@ -449,18 +446,18 @@
message = "success ratio ({comp} {perc:.1%}
threshold){message_string}{post}".format(
comp=comp, perc=self.success_threshold,
message_string=message_string, post=post)
log_message, nodes_string = self._get_log_message(num, message,
nodes=nodes)
- final_message = '{message}{nodes}'.format(message=log_message,
nodes=nodes_string)
if num == tot:
color = colorama.Fore.GREEN
- self.logger.info(final_message)
+ level = logging.INFO
elif success_ratio >= self.success_threshold:
color = colorama.Fore.YELLOW
- self.logger.warning(final_message)
+ level = logging.WARNING
else:
color = colorama.Fore.RED
- self.logger.error(final_message)
+ level = logging.CRITICAL
+ self.logger.log(level, '%s%s', log_message, nodes_string)
self._print_report_line(log_message, color=color,
nodes_string=nodes_string)
@@ -523,8 +520,8 @@
self.lock.release()
command = self.commands[self.current_command_index]
- self.logger.debug("command='{command}', timeout={timeout},
first_batch={first_batch}".format(
- command=command.command, timeout=command.timeout,
first_batch=first_batch_set))
+ self.logger.debug(
+ "command='%s', timeout=%d, first_batch=%s", command.command,
command.timeout, first_batch_set)
# Schedule the command for execution in ClusterShell
Task.task_self().flush_buffers()
@@ -585,8 +582,7 @@
:Parameters:
according to parent
:py:meth:`ClusterShell.Event.EventHandler.ev_hup`.
"""
- self.logger.debug("node={node}, rc={rc}, command='{command}'".format(
- node=worker.current_node, rc=worker.current_rc,
command=worker.command))
+ self.logger.debug("node=%s, rc=%d, command='%s'", worker.current_node,
worker.current_rc, worker.command)
self.lock.acquire() # Avoid modifications of the same data from other
callbacks triggered by ClusterShell
try:
@@ -636,8 +632,7 @@
if node is not None:
# Schedule the execution with ClusterShell of the current command
to the next node found above
command =
self.nodes[node.name].commands[self.nodes[node.name].running_command_index + 1]
- self.logger.debug("next_node={node}, timeout={timeout},
command='{command}'".format(
- node=node.name, command=command.command,
timeout=command.timeout))
+ self.logger.debug("next_node=%s, timeout=%d, command='%s'",
node.name, command.command, command.timeout)
Task.task_self().shell(
command.command, nodes=NodeSet.NodeSet(node.name),
handler=timer.eh, timeout=command.timeout)
return
@@ -658,15 +653,14 @@
# Avoid race conditions
if self.aborted or accounted != self.counters['total'] or command
is None or self.global_timedout:
- self.logger.debug("skipped timer")
+ self.logger.debug("Skipped timer")
return
if pending:
# This usually happens when executing in batches
- self.logger.warning("command '{command}' was not executed on:
{nodes}".format(
- command=command, nodes=NodeSet.NodeSet.fromlist(pending)))
+ self.logger.warning("Command '%s' was not executed on: %s",
command, NodeSet.NodeSet.fromlist(pending))
- self.logger.info("completed command
'{command}'".format(command=command))
+ self.logger.info("Completed command '%s'", command)
restart = self.end_command()
self.current_command_index += 1 # Move the global pointer of the
command in execution
@@ -732,8 +726,7 @@
:Parameters:
according to parent
:py:meth:`ClusterShell.Event.EventHandler.ev_hup`.
"""
- self.logger.debug("node={node}, rc={rc}, command='{command}'".format(
- node=worker.current_node, rc=worker.current_rc,
command=worker.command))
+ self.logger.debug("node=%s, rc=%d, command='%s'", worker.current_node,
worker.current_rc, worker.command)
schedule_next = False
schedule_timer = False
@@ -794,8 +787,7 @@
if node is not None:
# Schedule the exeuction of the first command to the next node
with ClusterShell
command = node.commands[0]
- self.logger.debug("next_node={node}, timeout={timeout},
command='{command}'".format(
- node=node.name, command=command.command,
timeout=command.timeout))
+ self.logger.debug("next_node=%s, timeout=%d, command='%s'",
node.name, command.command, command.timeout)
Task.task_self().shell(
command.command, nodes=NodeSet.NodeSet(node.name),
handler=timer.eh, timeout=command.timeout)
else:
diff --git a/prospector.yaml b/prospector.yaml
index d0b8c5e..8ceff7f 100644
--- a/prospector.yaml
+++ b/prospector.yaml
@@ -24,8 +24,6 @@
- D407 # Missing dashed underline after section, incompatible with Google
Style Python Docstrings
pylint:
- disable:
- - logging-format-interpolation # format() is used for coherence across
the project
options:
ignore: vulture_whitelist.py
max-line-length: 120
--
To view, visit https://gerrit.wikimedia.org/r/386400
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3ed78ec8e9c6e502c0f8a3b10ea5420e36953051
Gerrit-PatchSet: 1
Gerrit-Project: operations/software/cumin
Gerrit-Branch: master
Gerrit-Owner: Volans <[email protected]>
_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits