Awight has uploaded a new change for review.

  https://gerrit.wikimedia.org/r/314485

Change subject: Mindlessly fix flake8 things
......................................................................

Mindlessly fix flake8 things

Change-Id: I615fcfd186bd27286da7132d6f2e598dba046145
---
M FundraiserStatisticsGen/countrygen.py
M FundraiserStatisticsGen/fundstatgen.py
M FundraiserStatisticsGen/mediumgen.py
M FundraiserStatisticsGen/methodgen.py
M FundraiserStatisticsGen/sizes_by_day.py
M GeonameUpdater/geonames_updater.py
M SquidRipper/aggByTime.py
M SquidRipper/logparser.py
M SquidRipper/squidder.py
M audit/globalcollect/history.py
M audit/paypal/SarFile.py
M audit/paypal/TrrFile.py
M audit/paypal/history.py
M audit/paypal/paypal_api.py
M audit/paypal/ppreport.py
M audit/paypal/unicode_csv_reader.py
M civicrm/civicrm.py
M civicrm/tag.py
M database/db.py
M failmail/mailer.py
M fundraising_ab_tests/campaign_log.py
M fundraising_ab_tests/confidence.py
M fundraising_ab_tests/fundraising_experiment.py
M fundraising_ab_tests/results.py
M fundraising_ab_tests/results_gdoc.py
M fundraising_ab_tests/spec.py
M fundraising_ab_tests/spec_gdoc.py
M google/gdocs.py
M live_analysis/dump_tests.py
M mediawiki/api.py
M mediawiki/centralnotice/api.py
M mediawiki/centralnotice/contributions.py
M mediawiki/centralnotice/impressions.py
M mediawiki/centralnotice/time_util.py
M mediawiki/i18n.py
M mediawiki/page.py
M process/globals.py
M process/lock.py
M process/logging.py
M process/version_stamp.py
M queue/redis_wrap.py
M queue/tests/test_redis_wrap.py
M queue/tests/test_stomp_wrap.py
M sftp/client.py
M silverpop_export/export.py
M silverpop_export/update.py
M tox.ini
M whitelist/netdiff.py
48 files changed, 211 insertions(+), 142 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/wikimedia/fundraising/tools 
refs/changes/85/314485/1

diff --git a/FundraiserStatisticsGen/countrygen.py 
b/FundraiserStatisticsGen/countrygen.py
index ab66121..c446885 100644
--- a/FundraiserStatisticsGen/countrygen.py
+++ b/FundraiserStatisticsGen/countrygen.py
@@ -5,6 +5,7 @@
 from optparse import OptionParser
 from ConfigParser import SafeConfigParser
 
+
 def main():
     # Extract any command line options
     parser = OptionParser(usage="usage: %prog [options] <working directory>")
@@ -122,7 +123,7 @@
     return data
 
 
-def createSingleOutFile(stats, firstcols, filename, colnames = None):
+def createSingleOutFile(stats, firstcols, filename, colnames=None):
     """
     Creates a single report file from a keyed dict
 
diff --git a/FundraiserStatisticsGen/fundstatgen.py 
b/FundraiserStatisticsGen/fundstatgen.py
index 21ed704..acb5ae1 100644
--- a/FundraiserStatisticsGen/fundstatgen.py
+++ b/FundraiserStatisticsGen/fundstatgen.py
@@ -16,6 +16,7 @@
 
 CONFIG_DEFAULT_PATH = "/etc/fundraising/fundstatgen.cfg"
 
+
 def main():
     # Extract any command line options
     parser = OptionParser(usage="usage: %prog [options] <working directory>")
@@ -215,7 +216,7 @@
     return years, pivot
 
 
-def createOutputFiles(stats, firstcol, basename, colnames = None):
+def createOutputFiles(stats, firstcol, basename, colnames=None):
     """
     Creates a CSV file for each report in stats
     """
@@ -224,7 +225,7 @@
         createSingleOutFile(stats[report], firstcol, basename + report, 
colnames)
 
 
-def createSingleOutFile(stats, firstcols, basefilename, colnames = None):
+def createSingleOutFile(stats, firstcols, basefilename, colnames=None):
     """
     Creates a single report file from a keyed dict
 
@@ -264,7 +265,7 @@
 
     jsonfilename = basefilename + ".json"
     f = file(jsonfilename, 'w')
-    mapstats = [ dict(zip(firstcols + colnames, line)) for line in alldata ]
+    mapstats = [dict(zip(firstcols + colnames, line)) for line in alldata]
     json.dump(mapstats, f)
     f.close()
 
diff --git a/FundraiserStatisticsGen/mediumgen.py 
b/FundraiserStatisticsGen/mediumgen.py
index e860c1d..e6ddd9f 100644
--- a/FundraiserStatisticsGen/mediumgen.py
+++ b/FundraiserStatisticsGen/mediumgen.py
@@ -5,6 +5,7 @@
 from optparse import OptionParser
 from ConfigParser import SafeConfigParser
 
+
 def main():
     # Extract any command line options
     parser = OptionParser(usage="usage: %prog [options] <working directory>")
@@ -77,7 +78,7 @@
     return data
 
 
-def createSingleOutFile(stats, firstcols, filename, colnames = None):
+def createSingleOutFile(stats, firstcols, filename, colnames=None):
     """
     Creates a single report file from a keyed dict
 
diff --git a/FundraiserStatisticsGen/methodgen.py 
b/FundraiserStatisticsGen/methodgen.py
index 5d86788..c8cea1a 100644
--- a/FundraiserStatisticsGen/methodgen.py
+++ b/FundraiserStatisticsGen/methodgen.py
@@ -5,6 +5,7 @@
 from optparse import OptionParser
 from ConfigParser import SafeConfigParser
 
+
 def main():
     # Extract any command line options
     parser = OptionParser(usage="usage: %prog [options] <working directory>")
@@ -75,7 +76,7 @@
     return data
 
 
-def createSingleOutFile(stats, firstcols, filename, colnames = None):
+def createSingleOutFile(stats, firstcols, filename, colnames=None):
     """
     Creates a single report file from a keyed dict
 
diff --git a/FundraiserStatisticsGen/sizes_by_day.py 
b/FundraiserStatisticsGen/sizes_by_day.py
index 5cc579a..8c7e583 100644
--- a/FundraiserStatisticsGen/sizes_by_day.py
+++ b/FundraiserStatisticsGen/sizes_by_day.py
@@ -1,20 +1,20 @@
 #!/usr/bin/env python
 
-# FIXME: dayoffiscalyear
+# FIXME: dayoffiscalyear, parameterize all this
 start_time = "20120701"
 end_time = "20130701"
-#start_time = "20060101"
-#end_time = "20140101"
+# start_time = "20060101"
+# end_time = "20140101"
 ranges = [
     [0, 10],
-    [10,30],
-    [30,50],
-    [50,100],
-    [100,200],
-    [200,1000],
-    [1000,2500],
-    [2500,10000],
-    [10000,1000000000]
+    [10, 30],
+    [30, 50],
+    [50, 100],
+    [100, 200],
+    [200, 1000],
+    [1000, 2500],
+    [2500, 10000],
+    [10000, 1000000000]
 ]
 
 amount_slices_cols = ", ".join([
diff --git a/GeonameUpdater/geonames_updater.py 
b/GeonameUpdater/geonames_updater.py
index 68f4ed7..6141fdb 100644
--- a/GeonameUpdater/geonames_updater.py
+++ b/GeonameUpdater/geonames_updater.py
@@ -3,7 +3,7 @@
 from ConfigParser import SafeConfigParser
 from optparse import OptionParser
 import dateutil.parser
-#import pytz
+# import pytz
 import MySQLdb as MySQL
 import sys
 import os
diff --git a/SquidRipper/aggByTime.py b/SquidRipper/aggByTime.py
index abefe1c..7d7392d 100644
--- a/SquidRipper/aggByTime.py
+++ b/SquidRipper/aggByTime.py
@@ -5,6 +5,7 @@
 from dateutil.parser import parse as dateParse
 from datetime import datetime
 
+
 def main():
     # === Extract options ===
     parser = OptionParser(usage="usage: %prog [options] <timeColumn> 
<timeInterval> <groupByColumn> ...")
@@ -52,9 +53,9 @@
             colVals.append(parts[i])
         colVals = tuple(colVals)
 
-        if not ctime in data:
+        if ctime not in data:
             data[ctime] = {}
-        if not colVals in data[ctime]:
+        if colVals not in data[ctime]:
             data[ctime][colVals] = 1
         else:
             data[ctime][colVals] += 1
diff --git a/SquidRipper/logparser.py b/SquidRipper/logparser.py
index 7976610..d08ec4f 100644
--- a/SquidRipper/logparser.py
+++ b/SquidRipper/logparser.py
@@ -119,13 +119,12 @@
     elementDict = {}
     for element in elements:
         try:
-            (k,v) = element.split('=')
+            (k, v) = element.split('=')
         except ValueError:
             k = element
             v = None
         elementDict[k] = v
     return elementDict
-
 
 
 if __name__ == "__main__":
diff --git a/SquidRipper/squidder.py b/SquidRipper/squidder.py
index 46d440e..df5114a 100644
--- a/SquidRipper/squidder.py
+++ b/SquidRipper/squidder.py
@@ -10,6 +10,7 @@
 # NOTE: This script requires a set of WMF PyBal configuration files. These may 
be obtained
 # from fenari:/h/w/conf/pybal.conf. For more details talk to paravoid.
 
+
 def main():
     # === Extract options ===
     parser = OptionParser(usage="usage: %prog [options]")
diff --git a/audit/globalcollect/history.py b/audit/globalcollect/history.py
index 931d8b5..4911edb 100755
--- a/audit/globalcollect/history.py
+++ b/audit/globalcollect/history.py
@@ -16,10 +16,11 @@
 options = None
 args = None
 
+
 def main():
     global config, options, db
     parser = OptionParser(usage="usage: %prog [options]")
-    parser.add_option("-c", "--config", dest='configFile', default=[ 
"globalcollect-audit.cfg" ], action='append', help='Path to configuration file')
+    parser.add_option("-c", "--config", dest='configFile', 
default=["globalcollect-audit.cfg"], action='append', help='Path to 
configuration file')
     parser.add_option("-f", "--auditFile", dest='auditFile', default=None, 
help='CSV of transaction history')
     parser.add_option('-l', "--logFile", dest='logFile', default="audit.log", 
help='Destination logfile. New messages will be appended.')
     (options, args) = parser.parse_args()
@@ -34,7 +35,7 @@
 
     for line in infile:
         # TODO parse and filter on status ids
-        #if line["Status Description"] is not "COLLECTED":
+        # if line["Status Description"] is not "COLLECTED":
 
         normalized = {
             'transaction_id': line["Order ID"],
@@ -43,7 +44,7 @@
             'received': line["Received Date"],
             # GC has multiple time formats...
             'time_format': "%Y-%m-%d %H:%i",
-            #'time_format': "%c/%e/%y %k:%i",
+            # 'time_format': "%c/%e/%y %k:%i",
         }
 
         sql = """
@@ -61,6 +62,7 @@
 
 log_file = None
 
+
 def log(msg):
     global options, log_file
     if not log_file:
diff --git a/audit/paypal/SarFile.py b/audit/paypal/SarFile.py
index 795c43c..f86e8e6 100644
--- a/audit/paypal/SarFile.py
+++ b/audit/paypal/SarFile.py
@@ -9,8 +9,9 @@
 import ppreport
 from civicrm.civicrm import Civicrm
 
+
 class SarFile(object):
-    VERSION=2
+    VERSION = 2
     redis = None
     column_headers = [
         "Column Type",
@@ -63,7 +64,7 @@
 
         missing_fields = []
         for field in required_fields:
-            if not field in row or row[field] == '':
+            if field not in row or row[field] == '':
                 missing_fields.append(field)
         if missing_fields:
             raise RuntimeError("Message is missing some important fields: 
[{fields}]".format(fields=", ".join(missing_fields)))
diff --git a/audit/paypal/TrrFile.py b/audit/paypal/TrrFile.py
index 6522950..1bbed11 100644
--- a/audit/paypal/TrrFile.py
+++ b/audit/paypal/TrrFile.py
@@ -13,6 +13,7 @@
 from civicrm.civicrm import Civicrm
 from paypal_api import PaypalApiClassic
 
+
 class TrrFile(object):
     VERSION = [4, 8]
     redis = None
diff --git a/audit/paypal/history.py b/audit/paypal/history.py
index a62fa04..6d46b52 100755
--- a/audit/paypal/history.py
+++ b/audit/paypal/history.py
@@ -18,10 +18,11 @@
 civi = None
 log_file = None
 
+
 def main():
     global config, messaging, options, civi
     parser = OptionParser(usage="usage: %prog [options]")
-    parser.add_option("-c", "--config", dest='configFile', default=[ 
"paypal-audit.cfg" ], action='append', help='Path to configuration file')
+    parser.add_option("-c", "--config", dest='configFile', 
default=["paypal-audit.cfg"], action='append', help='Path to configuration 
file')
     parser.add_option("-f", "--auditFile", dest='auditFile', default=None, 
help='CSV of transaction history')
     parser.add_option('-l', "--logFile", dest='logFile', default="audit.log", 
help='Destination logfile. New messages will be appended.')
     parser.add_option("-n", "--no-effect", dest='noEffect', default=False, 
action="store_true", help="Dummy no-effect mode")
@@ -46,7 +47,7 @@
     locale.setlocale(locale.LC_NUMERIC, "")
 
     # fix spurious whitespace around column header names
-    infile.fieldnames = [ name.strip() for name in infile.fieldnames ]
+    infile.fieldnames = [name.strip() for name in infile.fieldnames]
 
     ignore_types = [
         "Authorization",
@@ -82,8 +83,10 @@
         else:
             handle_unknown(line)
 
+
 def handle_unknown(line):
     log("Unhandled transaction, type \"%s\": %s" % (line['Type'], 
json.dumps(line)))
+
 
 def handle_refund(line):
     global config, messaging, civi
@@ -104,6 +107,7 @@
     else:
         log("Refund already exists: %s" % (txn_id, ))
 
+
 def handle_payment(line):
     global config, messaging, civi
 
@@ -120,6 +124,7 @@
         messaging.send("payment", msg)
     else:
         log("Payment already exists: %s" % (txn_id, ))
+
 
 def normalize_msg(line):
     timestamp = dateutil.parser.parse(
@@ -153,6 +158,7 @@
         'gateway_txn_id': line['Transaction ID'],
     }
 
+
 def normalize_refund_msg(line):
     msg = normalize_msg(line)
 
diff --git a/audit/paypal/paypal_api.py b/audit/paypal/paypal_api.py
index a2974f7..327d7b6 100644
--- a/audit/paypal/paypal_api.py
+++ b/audit/paypal/paypal_api.py
@@ -30,11 +30,11 @@
         handlers = []
 
         # just for debugging DEBUGGING...
-        #httplib.HTTPConnection.debuglevel = 3
-        #httplib.HTTPSConnection.debuglevel = 3
+        # httplib.HTTPConnection.debuglevel = 3
+        # httplib.HTTPSConnection.debuglevel = 3
 
         if 'certificate_path' in config.api:
-            
#handlers.append(HTTPSClientAuthHandler(config.api.certificate_path, 
config.api.certificate_path, debuglevel=2))
+            # 
handlers.append(HTTPSClientAuthHandler(config.api.certificate_path, 
config.api.certificate_path, debuglevel=2))
             
handlers.append(HTTPSClientAuthHandler(config.api.certificate_path, 
config.api.certificate_path))
 
         opener = urllib2.build_opener(*handlers)
@@ -44,8 +44,8 @@
 
         return result
 
-# from 
http://stackoverflow.com/questions/1875052/using-paired-certificates-with-urllib2
 
+# from 
http://stackoverflow.com/questions/1875052/using-paired-certificates-with-urllib2
 class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
     def __init__(self, key, cert, **kw):
         urllib2.HTTPSHandler.__init__(self, **kw)
diff --git a/audit/paypal/ppreport.py b/audit/paypal/ppreport.py
index 78d7090..b18435a 100644
--- a/audit/paypal/ppreport.py
+++ b/audit/paypal/ppreport.py
@@ -9,12 +9,14 @@
     quotechar='"'
 )
 
+
 def read(path, version, callback, column_headers):
     try:
         read_encoded(path, version, callback, column_headers, 
encoding='utf-16')
     except UnicodeError:
         read_encoded(path, version, callback, column_headers, 
encoding='utf-8-sig')
 
+
 def read_encoded(path, version, callback, column_headers, encoding):
     # Coerce to a list
     if not hasattr(version, 'extend'):
diff --git a/audit/paypal/unicode_csv_reader.py 
b/audit/paypal/unicode_csv_reader.py
index de1d964..5d6ff3b 100644
--- a/audit/paypal/unicode_csv_reader.py
+++ b/audit/paypal/unicode_csv_reader.py
@@ -1,5 +1,6 @@
 import csv
 
+
 # from http://docs.python.org/2/library/csv.html
 def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
     # csv.py doesn't do Unicode; encode temporarily as UTF-8:
@@ -9,6 +10,7 @@
         # decode UTF-8 back to Unicode, cell by cell:
         yield [unicode(cell, 'utf-8') for cell in row]
 
+
 def utf_8_encoder(unicode_csv_data):
     for line in unicode_csv_data:
         yield line.encode('utf-8')
diff --git a/civicrm/civicrm.py b/civicrm/civicrm.py
index 0fc55d4..91c4a48 100644
--- a/civicrm/civicrm.py
+++ b/civicrm/civicrm.py
@@ -1,5 +1,6 @@
 from database.db import Connection
 
+
 class Civicrm(object):
     def __init__(self, config):
         self.db = Connection(**dict(config))
diff --git a/civicrm/tag.py b/civicrm/tag.py
index b6444fa..37ef193 100644
--- a/civicrm/tag.py
+++ b/civicrm/tag.py
@@ -1,6 +1,7 @@
 from process.globals import config
 from database import db
 
+
 class Tag(object):
     cache = {}
 
diff --git a/database/db.py b/database/db.py
index ec8c41d..92f229b 100644
--- a/database/db.py
+++ b/database/db.py
@@ -20,7 +20,7 @@
     def close(self):
         self.db_conn.commit()
 
-    def execute(self, sql, params=None, timeout = 0):
+    def execute(self, sql, params=None, timeout=0):
         cursor = self.db_conn.cursor(cursorclass=Dbi.cursors.DictCursor)
         deathClock = None
 
@@ -41,8 +41,8 @@
                 cursor.execute(sql.uninterpolated_sql(), sql.params)
             else:
                 cursor.execute(str(sql))
-            #for row in cursor.fetchall():
-            #  yield row
+            # for row in cursor.fetchall():
+            #     yield row
             out = cursor.fetchall()
             cursor.close()
             return out
@@ -57,7 +57,7 @@
         cursor.execute('KILL CONNECTION {}'.format(self.connection_id))
         killerConnection.close()
 
-    def execute_paged(self, query, pageIndex, pageSize = 1000, dir = 'ASC'):
+    def execute_paged(self, query, pageIndex, pageSize=1000, dir='ASC'):
         """ Execute a paged query. This will yield a dictionary of the results
         until there are no more results to yield. The pageIndex will be added
         to the order by automatically. If the Query already has a limit, it 
will
@@ -101,7 +101,6 @@
             else:
                 query.where.append("%s < %%(lastId)s" % (pageIndex))
 
-
     def last_insert_id(self):
         return self.db_conn.insert_id()
 
@@ -141,6 +140,7 @@
 
 db_conn = dict()
 
+
 def get_db(schema=None):
     '''Convenience'''
     global db_conn
@@ -155,9 +155,11 @@
 
     return db_conn[schema]
 
+
 def close_all():
     for conn in db_conn.values():
         conn.close()
+
 
 def handle_sigterm(signum, stack_frame):
     close_all()
@@ -166,4 +168,3 @@
 
 atexit.register(close_all)
 signal(SIGTERM, handle_sigterm)
-
diff --git a/failmail/mailer.py b/failmail/mailer.py
index 5ff4fae..6a468a5 100644
--- a/failmail/mailer.py
+++ b/failmail/mailer.py
@@ -7,6 +7,7 @@
 from process.globals import config
 from process.logging import Logger as log
 
+
 class FailMailer(object):
     @staticmethod
     def mail(errorcode, data=None, print_exception=False):
diff --git a/fundraising_ab_tests/campaign_log.py 
b/fundraising_ab_tests/campaign_log.py
index 04f7f99..2e9fa53 100644
--- a/fundraising_ab_tests/campaign_log.py
+++ b/fundraising_ab_tests/campaign_log.py
@@ -1,6 +1,7 @@
 from fundraising_ab_tests.fundraising_experiment import FrTest
 from process.globals import config
 
+
 def tests_from_entry(entry):
     '''
     Returns a tuple, (test ended, test begun)
@@ -37,6 +38,7 @@
 
     return (test_from_entry('begin'), test_from_entry('end'), )
 
+
 def get_relevant_events():
     from mediawiki.centralnotice.api import get_campaign_logs
     from mediawiki.centralnotice import time_util
@@ -49,4 +51,4 @@
             return True
 
     logs = 
get_campaign_logs(since=time_util.str_time_offset(days=-config.centralnotice_history_days))
-    return [ tests_from_entry(e) for e in reversed(logs) if is_relevant(e) ]
+    return [tests_from_entry(e) for e in reversed(logs) if is_relevant(e)]
diff --git a/fundraising_ab_tests/confidence.py 
b/fundraising_ab_tests/confidence.py
index e78c1dd..5b6a464 100644
--- a/fundraising_ab_tests/confidence.py
+++ b/fundraising_ab_tests/confidence.py
@@ -1,6 +1,7 @@
 from process.globals import config
 from stats.stats_abba import Experiment
 
+
 def add_confidence(results, name_column, successes_column):
     confidence = get_confidence(results, name_column, successes_column)
     if confidence:
@@ -12,6 +13,7 @@
     results[0].results.update({
         'confidencelink': get_confidence_link(results, name_column, 
successes_column)
     })
+
 
 def get_confidence(results, name_column=None, successes_column=None, 
trials=None):
     num_test_cases = len(results)
@@ -35,11 +37,11 @@
         baseline_num_trials=config.fudge_trials,
         confidence_level=config.confidence_level
     )
-    #useMultipleTestCorrection=true
+    # useMultipleTestCorrection=true
 
     cases = []
     for result in results:
-        #name = result.results[name_column]
+        # name = result.results[name_column]
         successes = result.results[successes_column]
         if hasattr(trials, 'encode'):
             trials = result.results[trials]
@@ -50,6 +52,7 @@
 
     return cases
 
+
 def get_confidence_link(results, name_column, successes_column):
     cases = []
     for result in results:
@@ -58,10 +61,9 @@
             continue
         name = result.results[name_column]
         successes = result.results[successes_column]
-        cases.append( "%s=%s,%s" % (name, successes, config.fudge_trials) )
+        cases.append("%s=%s,%s" % (name, successes, config.fudge_trials))
 
     return 
"http://www.thumbtack.com/labs/abba/#%(cases)s&abba:intervalConfidenceLevel=%(confidence)s&abba:useMultipleTestCorrection=true"
 % {
         'cases': "&".join(cases),
         'confidence': config.confidence_level,
     }
-
diff --git a/fundraising_ab_tests/fundraising_experiment.py 
b/fundraising_ab_tests/fundraising_experiment.py
index a200024..9ee2e77 100644
--- a/fundraising_ab_tests/fundraising_experiment.py
+++ b/fundraising_ab_tests/fundraising_experiment.py
@@ -3,9 +3,10 @@
 from process.globals import config
 from results import get_banner_results
 
+
 class FrTest(object):
     """Single N-way test
-    
+
     Currently, only banner tests are supported."""
 
     def __init__(self, label=None, type="", campaign=None, banners=None, 
start=None, end=None, disabled=False, **ignore):
@@ -26,13 +27,13 @@
             self.is_banner_test = True
             if banners:
                 if hasattr(banners, 'strip'):
-                    banners = [ s.strip() for s in banners.split(",") ]
+                    banners = [s.strip() for s in banners.split(",")]
                 self.banners = banners
             else:
                 if self.campaign['banners']:
                     self.banners = self.campaign['banners'].keys()
 
-            #self.variations = [ FrTestVariation(banner=name) for name in 
self.banners ]
+            # self.variations = [ FrTestVariation(banner=name) for name in 
self.banners ]
 
         self.is_country_test = (self.type.count('country') > 0)
         self.is_lp_test = (self.type.count('lp') > 0)
@@ -64,8 +65,8 @@
             self.results.extend(get_banner_results(cases))
 
         if self.is_country_test:
-            #results = [ calculate_result(country=code) for code in 
campaign['countries'] ]
-            #self.results.extend(results)
+            # results = [ calculate_result(country=code) for code in 
campaign['countries'] ]
+            # self.results.extend(results)
             log.warn("country test type not implemented")
 
         if self.is_lp_test:
@@ -99,4 +100,4 @@
             description += " lps: " + str(self.lps)
         return description
 
-#class FrTestVariation(object):
+# class FrTestVariation(object):
diff --git a/fundraising_ab_tests/results.py b/fundraising_ab_tests/results.py
index 1de915c..6fe610f 100644
--- a/fundraising_ab_tests/results.py
+++ b/fundraising_ab_tests/results.py
@@ -1,14 +1,13 @@
 import json
-import re
-
 from process.globals import config
 from mediawiki.centralnotice.contributions import get_totals
 from mediawiki.centralnotice.impressions import get_impressions
 from fundraising_ab_tests.confidence import add_confidence
 
+
 class TestResult(object):
     """Container for a test's results
-    
+
     TODO: fix single-responsibility issue with criteria"""
     def __init__(self, criteria=None, results={}):
         self.criteria = criteria
@@ -24,11 +23,12 @@
 
 
 def get_banner_results(cases):
-    results = [ banner_results(case) for case in cases ]
+    results = [banner_results(case) for case in cases]
 
     add_confidence(results, 'banner', 'donations')
 
     return results
+
 
 def banner_results(criteria):
     """Helper which retrieves performance statistics for the given test 
criteria"""
@@ -36,16 +36,16 @@
     impressions = get_impressions(**criteria)
 
     # FIXME: refactor to a variations hook
-    #match = re.match(config.fr_banner_naming, criteria['banner'])
-    #if match:
-    #    results.update({
-    #        'label': match.group("testname"),
-    #        'language': match.group("language"),
-    #        'variation': match.group("variation"),
-    #        'dropdown': match.group("dropdown") is "dr",
-    #        'country': match.group("country"),
+    # match = re.match(config.fr_banner_naming, criteria['banner'])
+    # if match:
+    #     results.update({
+    #         'label': match.group("testname"),
+    #         'language': match.group("language"),
+    #         'variation': match.group("variation"),
+    #         'dropdown': match.group("dropdown") is "dr",
+    #         'country': match.group("country"),
 
-    #    })
+    #     })
 
     # Get example locales, to help generate valid links
     language = criteria['languages'][0]
diff --git a/fundraising_ab_tests/results_gdoc.py 
b/fundraising_ab_tests/results_gdoc.py
index 4c3c333..ece29bf 100644
--- a/fundraising_ab_tests/results_gdoc.py
+++ b/fundraising_ab_tests/results_gdoc.py
@@ -1,6 +1,7 @@
 from google.gdocs import Spreadsheet
 from process.logging import Logger as log
 
+
 def write_gdoc_results(doc=None, results=[]):
     log.info("Writing test results to {url}".format(url=doc))
     doc = Spreadsheet(doc=doc)
@@ -10,6 +11,7 @@
         props.update(result['results'])
         doc.append_row(props)
 
+
 def update_gdoc_results(doc=None, results=[]):
     log.info("Updating results in {url}".format(url=doc))
     doc = Spreadsheet(doc=doc)
diff --git a/fundraising_ab_tests/spec.py b/fundraising_ab_tests/spec.py
index d442dcb..682bd17 100644
--- a/fundraising_ab_tests/spec.py
+++ b/fundraising_ab_tests/spec.py
@@ -13,16 +13,19 @@
 from process.globals import config
 from process.logging import Logger as log
 
+
 def parse_spec(spec):
     """Turn each row of a specification source into test objects"""
     for row in spec:
         yield FrTest(**row)
 
+
 def compare_test_fuzzy(a, b):
     """Check whether the tests match closely enough to be considered 
identical."""
     if a.campaign['name'] == b.campaign['name'] and a.banners == b.banners:
         return True
 
+
 def is_fr_test(test):
     if test.label and test.banners and test.campaign:
         is_chapter = re.search(config.fr_chapter_test, test.banners[0])
diff --git a/fundraising_ab_tests/spec_gdoc.py 
b/fundraising_ab_tests/spec_gdoc.py
index 2fda42a..af9d1b5 100644
--- a/fundraising_ab_tests/spec_gdoc.py
+++ b/fundraising_ab_tests/spec_gdoc.py
@@ -2,10 +2,12 @@
 from google.gdocs import Spreadsheet
 from process.logging import Logger as log
 
+
 def read_gdoc_spec(doc=None):
     rows = list(Spreadsheet(doc=doc).get_all_rows())
     return FrTestSpec(spec=list(parse_spec(rows)))
 
+
 def update_gdoc_spec(doc=None, spec=None):
     log.info("Updating test specs with latest CentralNotice changes... 
{url}".format(url=doc))
 
diff --git a/google/gdocs.py b/google/gdocs.py
index c987b12..cc864dd 100644
--- a/google/gdocs.py
+++ b/google/gdocs.py
@@ -11,14 +11,15 @@
 # TODO: ExecuteBatch; 2-leg oauth
 # TODO: cache rows locally, operate and then flush
 
+
 def authenticate(client):
-    #client.SetOAuthInputParameters(
-    #    gdata.auth.OAuthSignatureMethod.HMAC_SHA1,
-    #    consumer_key=config.gdocs['consumer_key'],
-    #    consumer_secret=config.gdocs['consumer_secret'],
-    #    two_legged_oauth=True,
-    #    requestor_id=config.gdocs['email']
-    #)
+    # client.SetOAuthInputParameters(
+    #     gdata.auth.OAuthSignatureMethod.HMAC_SHA1,
+    #     consumer_key=config.gdocs['consumer_key'],
+    #     consumer_secret=config.gdocs['consumer_secret'],
+    #     two_legged_oauth=True,
+    #     requestor_id=config.gdocs['email']
+    # )
     client.ClientLogin(
         config.gdocs['email'],
         config.gdocs['passwd'],
@@ -26,14 +27,15 @@
     )
     client.ssl = True
 
-def new_doc( title ):
+
+def new_doc(title):
     '''
     return doc_key
     '''
-    client = gdata.docs.service.DocsService( email=config.gdocs['email'], 
source=config.app_name )
-    authenticate( client )
+    client = gdata.docs.service.DocsService(email=config.gdocs['email'], 
source=config.app_name)
+    authenticate(client)
 
-    #entry = gdata.docs.data.Resource( type='spreadsheet', title=title, 
collection='bot: FR' )
+    # entry = gdata.docs.data.Resource(type='spreadsheet', title=title, 
collection='bot: FR')
     entry = client.Upload(
         gdata.MediaSource(
             file_name=title,
@@ -45,9 +47,10 @@
 
     return entry.id.text.rsplit('%3A')[-1]
 
+
 class Spreadsheet(object):
     def __init__(self, doc=None):
-        self.client = gdata.spreadsheet.service.SpreadsheetsService( 
source=config.app_name )
+        self.client = 
gdata.spreadsheet.service.SpreadsheetsService(source=config.app_name)
         authenticate(self.client)
 
         if doc:
@@ -56,7 +59,7 @@
             self.doc_key = doc.doc_key
             self.worksheet_id = doc.worksheet_id
         else:
-            self.doc_key = new_doc( 'test1' )
+            self.doc_key = new_doc('test1')
 
         if self.worksheet_id is None:
             self.worksheet_id = self.default_worksheet()
@@ -65,23 +68,23 @@
         '''
         return worksheet id
         '''
-        wk_feed = self.client.GetWorksheetsFeed( self.doc_key )
+        wk_feed = self.client.GetWorksheetsFeed(self.doc_key)
         return wk_feed.entry[0].id.text.rsplit('/')[-1]
 
     def render_headers(self, columns):
         # TODO: set extension cell type and format
         for i, name in enumerate(columns, 1):
-            cur = self.get_cell( (1, i) )
+            cur = self.get_cell((1, i))
             if cur and cur != name:
                 raise Exception("Unexpected header in location (%d, %d): %s" % 
(1, i, cur,))
-            self.client.UpdateCell( 1, i, name, self.doc_key, 
self.worksheet_id )
+            self.client.UpdateCell(1, i, name, self.doc_key, self.worksheet_id)
 
     def num_rows(self):
-        feed = self.client.GetListFeed( self.doc_key, 
wksht_id=self.worksheet_id )
-        #FIXME: race condition
-        return len( feed.entry ) + 1
+        feed = self.client.GetListFeed(self.doc_key, 
wksht_id=self.worksheet_id)
+        # FIXME: race condition
+        return len(feed.entry) + 1
 
-    def append_row( self, row ):
+    def append_row(self, row):
         rendered = {}
         for key, e in row.items():
             if e is None:
@@ -89,7 +92,7 @@
             if not hasattr(e, 'decode'):
                 e = str(e)
             rendered[key] = e
-        self.client.InsertRow( rendered, self.doc_key, self.worksheet_id )
+        self.client.InsertRow(rendered, self.doc_key, self.worksheet_id)
 
     def update_row(self, props, index=None, matching=None):
         if matching:
@@ -97,7 +100,7 @@
             # if len(matches) > 1:
             #   raise Exception
             pass
-        feed = self.client.GetListFeed( self.doc_key, 
wksht_id=self.worksheet_id )
+        feed = self.client.GetListFeed(self.doc_key, 
wksht_id=self.worksheet_id)
         entry = feed.entry[index - 1]
         for k, v in props.items():
             if k in entry.custom:
@@ -106,10 +109,10 @@
             if a_link.rel == 'edit':
                 self.client.Put(entry, a_link.href)
 
-    def set_cell( self, addr, data ):
-        self.client.UpdateCell( addr[0], addr[1], data, self.doc_key, 
self.worksheet_id )
+    def set_cell(self, addr, data):
+        self.client.UpdateCell(addr[0], addr[1], data, self.doc_key, 
self.worksheet_id)
 
-    def get_cell( self, addr ):
+    def get_cell(self, addr):
         feed = self.client.GetCellsFeed(
             self.doc_key,
             wksht_id=self.worksheet_id,
@@ -117,8 +120,8 @@
         )
         return feed.text
 
-    def get_row( self, row ):
-        feed = self.client.GetListFeed( self.doc_key, 
wksht_id=self.worksheet_id )
+    def get_row(self, row):
+        feed = self.client.GetListFeed(self.doc_key, 
wksht_id=self.worksheet_id)
         if row > len(feed.entry):
             return None
         ret = {}
@@ -126,14 +129,14 @@
             ret[key] = value.text
         return ret
 
-    def rc_addr( self, addr ):
-        return "R%dC%d" % ( addr[0], addr[1], )
+    def rc_addr(self, addr):
+        return "R%dC%d" % (addr[0], addr[1],)
 
     def get_all_rows(self):
         '''
         Dump entire spreadsheet and return as a list of dicts
         '''
-        feed = self.client.GetListFeed( self.doc_key, 
wksht_id=self.worksheet_id )
+        feed = self.client.GetListFeed(self.doc_key, 
wksht_id=self.worksheet_id)
         for line in feed.entry:
             row = {}
             for key, value in line.custom.items():
@@ -155,5 +158,5 @@
             self.worksheet_id = worksheet_id
 
     def __repr__(self):
-        #FIXME doc types
+        # FIXME doc types
         return "https://docs.google.com/spreadsheet/ccc?key=%s#gid=%d"; % 
(self.doc_key, self.worksheet_id - 1)
diff --git a/live_analysis/dump_tests.py b/live_analysis/dump_tests.py
index a3dab29..ba6599d 100755
--- a/live_analysis/dump_tests.py
+++ b/live_analysis/dump_tests.py
@@ -23,13 +23,14 @@
     if 'enabled' in entry['added'] or entry['begin']['enabled'] is 1:
         return True
 
+
 def fetch():
     out = csv.DictWriter(sys.stdout, [
         'campaign',
         'banner',
         'start',
         'end',
-        #FIXME: 'lps',
+        # FIXME: 'lps',
     ], delimiter="\t")
 
     out.writeheader()
@@ -43,12 +44,12 @@
         for test in logs:
             if is_relevant(test):
                 for banner in test['end']['banners'].keys():
-                    out.writerow( {
+                    out.writerow({
                         'campaign': test['campaign'].encode('utf-8'),
                         'banner': banner.encode('utf-8'),
                         'start': test['end']['start'],
                         'end': test['end']['end'],
-                    } )
+                    })
 
         if not logs:
             break
diff --git a/mediawiki/api.py b/mediawiki/api.py
index 49ea188..d8b9369 100644
--- a/mediawiki/api.py
+++ b/mediawiki/api.py
@@ -13,10 +13,10 @@
         api,
         user_agent='bot: fr-anal'
     )
-    result = wiki.call( args )
+    result = wiki.call(args)
     if 'error' in result:
         raise RuntimeError(json.dumps(result, indent=4).replace('\\n', '\n'))
-    val = result[ args['action'] ]
+    val = result[args['action']]
     if 'list' in args:
-        val = val[ args['list'] ]
+        val = val[args['list']]
     return val
diff --git a/mediawiki/centralnotice/api.py b/mediawiki/centralnotice/api.py
index 9589a4a..67c6cb4 100644
--- a/mediawiki/centralnotice/api.py
+++ b/mediawiki/centralnotice/api.py
@@ -6,38 +6,42 @@
 
 cached_campaigns = {}
 
-def get_banners( **kw ):
-    if 'campaign' in kw:
-        campaign = get_campaign( kw['campaign'] )
-        return campaign['banners'].keys()
-    return get_allocations( **kw )
 
-def get_campaign( campaign ):
-    #TODO: push caching down into mediawiki.mw_call, with optional invalidation
+def get_banners(**kw):
+    if 'campaign' in kw:
+        campaign = get_campaign(kw['campaign'])
+        return campaign['banners'].keys()
+    return get_allocations(**kw)
+
+
+def get_campaign(campaign):
+    # TODO: push caching down into mediawiki.mw_call, with optional 
invalidation
     global cached_campaigns
     if campaign in cached_campaigns:
         return cached_campaigns[campaign]
 
-    #if '__iter__' in campaign: return get_campaigns
-    result = mw_call( {
+    # if '__iter__' in campaign: return get_campaigns
+    result = mw_call({
         'action': 'centralnoticequerycampaign',
         'campaign': campaign,
-    } )
+    })
 
     if campaign in result:
         result[campaign]['name'] = campaign
         cached_campaigns[campaign] = result[campaign]
         return cached_campaigns[campaign]
 
-def get_campaigns( campaigns ):
-    #FIXME cache
-    return mw_call( {
-        'action': 'centralnoticequerycampaign',
-        'campaign': '|'.join( campaigns ),
-    } )
 
-def get_allocations( project=None, language=None, country=None, 
anonymous=True, bucket='0' ): 
-    result = mw_call( {
+def get_campaigns(campaigns):
+    # FIXME cache
+    return mw_call({
+        'action': 'centralnoticequerycampaign',
+        'campaign': '|'.join(campaigns),
+    })
+
+
+def get_allocations(project=None, language=None, country=None, anonymous=True, 
bucket='0'):
+    result = mw_call({
         'action': 'centralnoticeallocations',
         'project': project,
         'language': language,
@@ -45,10 +49,11 @@
         'anonymous': anonymous,
         'bucket': bucket,
         'minimal': 'false'
-    } )
+    })
     return result['banners']
 
-def get_campaign_logs( since=None, limit=50, offset=0 ):
+
+def get_campaign_logs(since=None, limit=50, offset=0):
     params = {
         'action': 'query',
         'list': 'centralnoticelogs',
@@ -58,5 +63,5 @@
     if since:
         params['start'] = since
 
-    result = mw_call( params )
+    result = mw_call(params)
     return result['logs']
diff --git a/mediawiki/centralnotice/contributions.py 
b/mediawiki/centralnotice/contributions.py
index bf19e2b..d7c604a 100644
--- a/mediawiki/centralnotice/contributions.py
+++ b/mediawiki/centralnotice/contributions.py
@@ -26,13 +26,14 @@
 
     change = None
     if ref_totals['total']:
-        change = round( 100 * cur_totals['total'] / ref_totals['total'] - 100, 
1 )
+        change = round(100 * cur_totals['total'] / ref_totals['total'] - 100, 
1)
     cur_totals['frac_change'] = change
 
     return cur_totals
 
-#FIXME: instead of ignoring args, intersect the criteria during update
-def get_totals(wheres = None, query=None, banner=None, campaign=None, 
country=None, start=None, end=None, **ignore):
+
+# FIXME: instead of ignoring args, intersect the criteria during update
+def get_totals(wheres=None, query=None, banner=None, campaign=None, 
country=None, start=None, end=None, **ignore):
     '''
     Note that the column names must match a heading in the results spreadsheet.
     '''
@@ -55,7 +56,7 @@
         query.where.append("utm_campaign = %(campaign)s")
         query.params['campaign'] = campaign
     if banner:
-        query.columns.append( ct_banner_clause + " AS banner" )
+        query.columns.append(ct_banner_clause + " AS banner")
         query.where.append(ct_banner_clause + " = %(banner)s")
         query.params['banner'] = banner
     if country:
diff --git a/mediawiki/centralnotice/impressions.py 
b/mediawiki/centralnotice/impressions.py
index 217030c..fd0e404 100644
--- a/mediawiki/centralnotice/impressions.py
+++ b/mediawiki/centralnotice/impressions.py
@@ -1,6 +1,7 @@
 from database import db
 from process.globals import config
 
+
 def get_impressions(campaign=None, banner=None, **ignore):
     query = db.Query()
     query.columns.append("SUM(count) AS count")
diff --git a/mediawiki/centralnotice/time_util.py 
b/mediawiki/centralnotice/time_util.py
index 9f1463f..23ac50c 100644
--- a/mediawiki/centralnotice/time_util.py
+++ b/mediawiki/centralnotice/time_util.py
@@ -3,21 +3,26 @@
 '''
 from datetime import datetime, timedelta
 
+
 def str_time_offset(str_time=None, **delta_args):
     if not str_time:
         str_time = str_now()
-    time_time = datetime.strptime( str_time, '%Y%m%d%H%M%S' )
-    str_time = ( time_time + timedelta( **delta_args )).strftime( 
'%Y%m%d%H%M%S' )
+    time_time = datetime.strptime(str_time, '%Y%m%d%H%M%S')
+    str_time = (time_time + timedelta(**delta_args)).strftime('%Y%m%d%H%M%S')
     return(str_time)
 
-def str_now():
-    return( datetime.utcnow().strftime('%Y%m%d%H%M%S') )
 
-def datetimefunix( unix_timestamp ):
+def str_now():
+    return datetime.utcnow().strftime('%Y%m%d%H%M%S')
+
+
+def datetimefunix(unix_timestamp):
     return datetime.fromtimestamp(unix_timestamp)
 
-def strfunix( unix_timestamp ):
+
+def strfunix(unix_timestamp):
     return datetime.fromtimestamp(unix_timestamp).strftime('%Y-%m-%d %H:%M')
 
+
 def same_time_another_day(ref_day, time):
     return ref_day[:8] + time[-6:]
diff --git a/mediawiki/i18n.py b/mediawiki/i18n.py
index 0a32e9f..7af58fe 100644
--- a/mediawiki/i18n.py
+++ b/mediawiki/i18n.py
@@ -1,5 +1,6 @@
 from mediawiki.api import mw_call
 
+
 def get_languages(**kw):
     result = mw_call({
         'action': 'query',
diff --git a/mediawiki/page.py b/mediawiki/page.py
index 3bb9384..a540cdc 100644
--- a/mediawiki/page.py
+++ b/mediawiki/page.py
@@ -1,5 +1,6 @@
 from mediawiki.api import mw_call
 
+
 def get_content(title, **kw):
     result = mw_call({
         'action': 'query',
diff --git a/process/globals.py b/process/globals.py
index edb6aa5..4b7825b 100644
--- a/process/globals.py
+++ b/process/globals.py
@@ -6,6 +6,7 @@
 # n.b. Careful not to import `config` by value
 config = dict()
 
+
 def load_config(app_name):
     global config
 
@@ -30,15 +31,17 @@
 
         config.app_name = app_name
 
-        return
+        return config
 
     raise Exception("No config found, searched " + ", ".join(search_filenames))
+
 
 def get_config():
     """Procedural way to get the config, to workaround early bootstrapping 
fluctuations"""
     global config
     return config
 
+
 class DictAsAttrDict(dict):
     def __getattr__(self, name):
         value = self[name]
diff --git a/process/lock.py b/process/lock.py
index da0eef5..702e748 100644
--- a/process/lock.py
+++ b/process/lock.py
@@ -3,12 +3,14 @@
 
 Self-corrects stale locks unless "failopen" is True.
 '''
-import os, os.path
+import os
+import os.path
 import sys
 
 from logging import Logger as log
 
 lockfile = None
+
 
 def begin(filename=None, failopen=False):
     if not filename:
@@ -47,6 +49,7 @@
     global lockfile
     lockfile = filename
 
+
 def end():
     global lockfile
     if lockfile:
diff --git a/process/logging.py b/process/logging.py
index 9eb284e..cdf23d0 100644
--- a/process/logging.py
+++ b/process/logging.py
@@ -2,8 +2,8 @@
 import syslog
 import os.path
 
+
 class Logger(object):
-    
     @staticmethod
     def debug(message):
         Logger.log(message, syslog.LOG_DEBUG)
diff --git a/process/version_stamp.py b/process/version_stamp.py
index 480cf43..0da23f6 100644
--- a/process/version_stamp.py
+++ b/process/version_stamp.py
@@ -2,6 +2,7 @@
 
 cached_revision = None
 
+
 def source_revision():
     global cached_revision
 
diff --git a/queue/redis_wrap.py b/queue/redis_wrap.py
index b42bd90..bb90fc4 100644
--- a/queue/redis_wrap.py
+++ b/queue/redis_wrap.py
@@ -4,6 +4,7 @@
 import json
 import redis
 
+
 class Redis(object):
 
     conn = None
diff --git a/queue/tests/test_redis_wrap.py b/queue/tests/test_redis_wrap.py
index 7214e02..3f86185 100644
--- a/queue/tests/test_redis_wrap.py
+++ b/queue/tests/test_redis_wrap.py
@@ -2,6 +2,7 @@
 
 test_queue = "test_queue"
 
+
 @patch("redis.Redis")
 def test_send(MockPyRedis):
     data = dict(a=1)
diff --git a/queue/tests/test_stomp_wrap.py b/queue/tests/test_stomp_wrap.py
index 99a177e..3bf2d20 100644
--- a/queue/tests/test_stomp_wrap.py
+++ b/queue/tests/test_stomp_wrap.py
@@ -2,11 +2,12 @@
 
 from queue.stomp_wrap import Stomp
 
+
 def test_source_meta():
     meta = Stomp.source_meta()
-    assert meta['source_name'] != None
+    assert meta['source_name'] is not None
     assert 'audit' == meta['source_type']
     assert int(meta['source_run_id']) > 0
-    assert meta['source_version'] != None
+    assert meta['source_version'] is not None
     assert meta['source_enqueued_time'] >= (time.time() - 60)
-    assert meta['source_host'] != None
+    assert meta['source_host'] is not None
diff --git a/sftp/client.py b/sftp/client.py
index 8bf3a31..831a714 100644
--- a/sftp/client.py
+++ b/sftp/client.py
@@ -1,10 +1,12 @@
-import os, os.path
+import os
+import os.path
 import base64
 import paramiko
 import StringIO
 
 from process.logging import Logger as log
 from process.globals import config
+
 
 class Client(object):
     def __init__(self):
@@ -85,13 +87,14 @@
             if hasattr(config, 'panic_on_empty') and config.panic_on_empty:
                 raise RuntimeError("Stupid files did not download correctly.")
 
+
 def walk_files(paths):
     '''List all files under these path(s)
 
     Parameters
     ==========
     * paths - single or list of paths
-    
+
     Return value
     ============
     A list of all files found under the root directory(ies)
@@ -108,6 +111,7 @@
 
     return result
 
+
 def make_key(keystr=None):
     '''Cheesily detect a key string's type and create a Key object from it
 
diff --git a/silverpop_export/export.py b/silverpop_export/export.py
index e707efe..370a79d 100644
--- a/silverpop_export/export.py
+++ b/silverpop_export/export.py
@@ -71,6 +71,7 @@
     output.close()
     log.info("Wrote %d rows" % num_rows)
 
+
 def export_data(output_path=None):
     db = DbConnection(**config.silverpop_db)
 
diff --git a/silverpop_export/update.py b/silverpop_export/update.py
index 9524bfd..aed3177 100644
--- a/silverpop_export/update.py
+++ b/silverpop_export/update.py
@@ -18,7 +18,7 @@
     script_path = os.path.dirname(__file__)
     qbuf = [prefix]
     queries = []
-    f = open( os.path.join( script_path, file ), 'r' )
+    f = open(os.path.join(script_path, file), 'r')
     for line in f:
         line = line.rstrip()
         if line:
diff --git a/tox.ini b/tox.ini
index caa0248..c0ec12b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,4 +17,6 @@
 [flake8]
 # E501 line too long (X > 79 characters)
 ignore=E501
-exclude = .tox
+exclude =
+       .tox,
+       stats/stats_abba.py
diff --git a/whitelist/netdiff.py b/whitelist/netdiff.py
index da6d548..b76dbec 100755
--- a/whitelist/netdiff.py
+++ b/whitelist/netdiff.py
@@ -19,4 +19,3 @@
 
 for cidr in net.iter_cidrs():
     print(cidr)
-

-- 
To view, visit https://gerrit.wikimedia.org/r/314485
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I615fcfd186bd27286da7132d6f2e598dba046145
Gerrit-PatchSet: 1
Gerrit-Project: wikimedia/fundraising/tools
Gerrit-Branch: master
Gerrit-Owner: Awight <awi...@wikimedia.org>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to