jenkins-bot has submitted this change and it was merged.

Change subject: fix some pep8 errors
......................................................................


fix some pep8 errors

46      E713 test for membership should be 'not in'
using a semi-automated script

Change-Id: I7ca12dbc3a0ec634d8ebea18637e110109aacb68
---
M archivebot.py
M category_redirect.py
M catlib.py
M copyright.py
M copyright_put.py
M cosmetic_changes.py
M data_ingestion.py
M featured.py
M generate_user_files.py
M interwiki.py
M makecat.py
M rcsort.py
M redirect.py
M reflinks.py
M replicate_wiki.py
M spamremove.py
M spellcheck.py
M splitwarning.py
M us-states.py
M wikipedia.py
M wiktionary.py
M xmlreader.py
22 files changed, 49 insertions(+), 49 deletions(-)

Approvals:
  Xqt: Looks good to me, approved
  jenkins-bot: Verified



diff --git a/archivebot.py b/archivebot.py
index 3146732..c4fe7c4 100644
--- a/archivebot.py
+++ b/archivebot.py
@@ -565,7 +565,7 @@
            and not self.Page.title() + '/' == archive[:len(self.Page.title()) 
+ 1] \
            and not self.key_ok():
             raise ArchiveSecurityError
-        if not archive in self.archives:
+        if archive not in self.archives:
             self.archives[archive] = DiscussionPage(archive, self, vars)
         return self.archives[archive].feedThread(thread, maxArchiveSize)
 
diff --git a/category_redirect.py b/category_redirect.py
index 6fae703..679915a 100644
--- a/category_redirect.py
+++ b/category_redirect.py
@@ -209,7 +209,7 @@
         querydata = {'action': 'query',
                      'maxlag': str(pywikibot.config.maxlag)}
         querydata = query.CombineParams(querydata, data)
-        if not "action" in querydata or not querydata['action'] == 'query':
+        if "action" not in querydata or querydata['action'] != 'query':
             raise ValueError(
                 "query_results: 'action' set to value other than 'query'")
         waited = 0
diff --git a/catlib.py b/catlib.py
index a222008..61fd798 100644
--- a/catlib.py
+++ b/catlib.py
@@ -155,12 +155,12 @@
                                                  sortdir, endsort):
                 if tag == ARTICLE:
                     self.articleCache.append(page)
-                    if not page in cache:
+                    if page not in cache:
                         cache.append(page)
                         yield ARTICLE, page
                 elif tag == SUBCATEGORY:
                     self.subcatCache.append(page)
-                    if not page in cache:
+                    if page not in cache:
                         cache.append(page)
                         yield SUBCATEGORY, page
                         if recurse:
@@ -594,7 +594,7 @@
 def add_category(article, category, comment=None, createEmptyPages=False):
     """Given an article and a category, adds the article to the category."""
     cats = article.categories(get_redirect=True)
-    if not category in cats:
+    if category not in cats:
         cats.append(category)
         try:
             text = article.get()
diff --git a/copyright.py b/copyright.py
index d293b8a..d4de5bb 100644
--- a/copyright.py
+++ b/copyright.py
@@ -436,7 +436,7 @@
     def sanity_check(self):
         print "Exclusion list sanity check..."
         for entry in self.URLlist:
-            if (not '.' in entry and not '/' in entry) or len(entry) < 5:
+            if ('.' not in entry and '/' not in entry) or len(entry) < 5:
                 print "** " + entry
 
     def dump(self):
@@ -613,7 +613,7 @@
 
 
 def mysplit(text, dim, sep):
-    if not sep in text:
+    if sep not in text:
         return [text]
     t = text
     l = list()
diff --git a/copyright_put.py b/copyright_put.py
index cfefab8..8ae0430 100644
--- a/copyright_put.py
+++ b/copyright_put.py
@@ -178,7 +178,7 @@
 
 def output_files_gen():
     for f in os.listdir(appdir):
-        if 'output' in f and not '_pending' in f:
+        if 'output' in f and '_pending' not in f:
             m = re.search('output_(.*?)\.txt', f)
             if m:
                 tag = m.group(1)
diff --git a/cosmetic_changes.py b/cosmetic_changes.py
index 2085ff0..528104c 100644
--- a/cosmetic_changes.py
+++ b/cosmetic_changes.py
@@ -245,9 +245,9 @@
         # German Wikipedia. See
         # 
http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
         # ignoring nn-wiki of cause of the comment line above iw section
-        if not self.template and not '{{Personendaten' in text and \
-           not '{{SORTIERUNG' in text and not '{{DEFAULTSORT' in text and \
-           not self.site.lang in ('et', 'it', 'bg', 'ru'):
+        if not self.template and '{{Personendaten' not in text and \
+           '{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \
+           self.site.lang not in ('et', 'it', 'bg', 'ru'):
             try:
                 categories = pywikibot.getCategoryLinks(text, site=self.site)
             # there are categories like [[categoy:Foo {{#time:Y...}}]]
diff --git a/data_ingestion.py b/data_ingestion.py
index 4098399..be12237 100644
--- a/data_ingestion.py
+++ b/data_ingestion.py
@@ -191,11 +191,11 @@
             metadata = JSONTree(metadata, fieldlist + [k], v)
     elif type(record) == unicode:
         key = u'_'.join(fieldlist)
-        if not key in metadata:
+        if key not in metadata:
             metadata[key] = record
         else:
             newkey = key + u'_2'
-            if not newkey in metadata:
+            if newkey not in metadata:
                 metadata[newkey] = record
     return metadata
 
diff --git a/featured.py b/featured.py
index 230e054..67fd44b 100644
--- a/featured.py
+++ b/featured.py
@@ -461,9 +461,9 @@
 
 
 def featuredWithInterwiki(fromsite, tosite, template_on_top, pType, quiet):
-    if not fromsite.lang in cache:
+    if fromsite.lang not in cache:
         cache[fromsite.lang] = {}
-    if not tosite.lang in cache[fromsite.lang]:
+    if tosite.lang not in cache[fromsite.lang]:
         cache[fromsite.lang][tosite.lang] = {}
     cc = cache[fromsite.lang][tosite.lang]
     if nocache:
diff --git a/generate_user_files.py b/generate_user_files.py
index 8a5263d..d51f0fa 100644
--- a/generate_user_files.py
+++ b/generate_user_files.py
@@ -41,7 +41,7 @@
         except ValueError:
             pass
         if isinstance(choice, basestring):
-            if not choice in clist:
+            if choice not in clist:
                 print("Invalid response")
             else:
                 return choice
@@ -185,5 +185,5 @@
     if choice == "3":
         create_user_config('')
         create_user_fixes('')
-    if not choice in "123":
+    if choice not in "123":
         print("Nothing to do")
diff --git a/interwiki.py b/interwiki.py
index 14cc119..100044f 100644
--- a/interwiki.py
+++ b/interwiki.py
@@ -732,7 +732,7 @@
 
     def add(self, page):
         site = page.site
-        if not site in self.tree:
+        if site not in self.tree:
             self.tree[site] = []
         self.tree[site].append(page)
         self.size += 1
@@ -1226,7 +1226,7 @@
                     if newhint == '?':
                         t += globalvar.showtextlinkadd
                         pywikibot.output(self.originPage.get()[:t])
-                    elif newhint and not ':' in newhint:
+                    elif newhint and ':' not in newhint:
                         pywikibot.output(
                             u'Please enter a hint in the format '
                             u'language:pagename or type nothing if you do not '
diff --git a/makecat.py b/makecat.py
index 57531a2..3cb9ab7 100644
--- a/makecat.py
+++ b/makecat.py
@@ -103,7 +103,7 @@
             pass
         else:
             cats = pl.categories()
-            if not workingcat in cats:
+            if workingcat not in cats:
                 cats = pl.categories()
                 for c in cats:
                     if c in parentcats:
@@ -178,7 +178,7 @@
         elif answer == 'a':
             pagetitle = raw_input("Specify page to add:")
             page = pywikibot.Page(pywikibot.getSite(), pagetitle)
-            if not page in checked.keys():
+            if page not in checked.keys():
                 include(page)
         elif answer == 'x':
             if pl.exists():
diff --git a/rcsort.py b/rcsort.py
index f338404..799dd36 100644
--- a/rcsort.py
+++ b/rcsort.py
@@ -47,7 +47,7 @@
 for element in form:
     if element != 'newbies':
         path += '&%s=%s' % (element, form[element].value)
-if not 'limit' in form:
+if 'limit' not in form:
     path += '&limit=1000'
 
 text = mysite.getUrl(path)
diff --git a/redirect.py b/redirect.py
index f75a352..1cd7c40 100644
--- a/redirect.py
+++ b/redirect.py
@@ -772,7 +772,7 @@
             except ValueError:
 #-namespace:all Process all namespaces. Works only with the API read interface.
                 pass
-            if not ns in namespaces:
+            if ns not in namespaces:
                 namespaces.append(ns)
         elif arg.startswith('-offset:'):
             offset = int(arg[8:])
diff --git a/reflinks.py b/reflinks.py
index 23d282e..1f0519d 100644
--- a/reflinks.py
+++ b/reflinks.py
@@ -306,7 +306,7 @@
 
             params = match.group('params')
             group = self.GROUPS.match(params)
-            if not group in foundRefs:
+            if group not in foundRefs:
                 foundRefs[group] = {}
 
             groupdict = foundRefs[group]
@@ -327,7 +327,7 @@
 
                     if name == 'population':
                         pywikibot.output(content)
-                    if not name in foundRefNames:
+                    if name not in foundRefNames:
                         # first time ever we meet this name
                         if name == 'population':
                             print "in"
diff --git a/replicate_wiki.py b/replicate_wiki.py
index 17f9383..5c1d3d5 100644
--- a/replicate_wiki.py
+++ b/replicate_wiki.py
@@ -132,7 +132,7 @@
         pages = imap(lambda p: p.title(),
                      self.original.allpages('!', namespace))
         for p in pages:
-            if not p in ['MediaWiki:Sidebar', 'MediaWiki:Mainpage',
+            if p not in ['MediaWiki:Sidebar', 'MediaWiki:Mainpage',
                          'MediaWiki:Sitenotice', 'MediaWiki:MenuSidebar']:
                 try:
                     self.check_page(p)
diff --git a/spamremove.py b/spamremove.py
index d726421..0413ebe 100644
--- a/spamremove.py
+++ b/spamremove.py
@@ -85,7 +85,7 @@
         pywikibot.getall(mysite, pages)
         for p in pages:
             text = p.get()
-            if not spamSite in text:
+            if spamSite not in text:
                 continue
             # Show the title of the page we're working on.
             # Highlight the title in purple.
diff --git a/spellcheck.py b/spellcheck.py
index 7602ed1..4861caa 100644
--- a/spellcheck.py
+++ b/spellcheck.py
@@ -423,7 +423,7 @@
         knownwords[self.word] = self.word
 
     def declare_alternative(self, alt):
-        if not alt in knownwords[self.word]:
+        if alt not in knownwords[self.word]:
             knownwords[self.word].append(word)
             newwords.append(self.word)
         return self.alternatives
@@ -505,7 +505,7 @@
                     word = line[1]
                     knownwords[word] = line[2:]
                     for word2 in line[2:]:
-                        if not '_' in word2:
+                        if '_' not in word2:
                             knownwords[word2] = word2
         f.close()
     except IOError:
diff --git a/splitwarning.py b/splitwarning.py
index 6cecbec..16be9d1 100644
--- a/splitwarning.py
+++ b/splitwarning.py
@@ -32,7 +32,7 @@
             family = m.group('family')
             code = m.group('code')
             if code in pywikibot.getSite().languages():
-                if not code in files:
+                if code not in files:
                     files[code] = codecs.open(
                         pywikibot.config.datafilepath(
                             folder, 'warning-%s-%s.log' % (family, code)),
diff --git a/us-states.py b/us-states.py
index 2fe2a3c..0cdc9e2 100644
--- a/us-states.py
+++ b/us-states.py
@@ -132,7 +132,7 @@
                     if force:
                         change = 'y'
                     else:
-                        while not change in ['y', 'n']:
+                        while change not in ['y', 'n']:
                             pywikibot.output(
                                 u"Create redirect %s" %
                                 pl.title().replace("%2C",
diff --git a/wikipedia.py b/wikipedia.py
index 13ee2ab..987a724 100644
--- a/wikipedia.py
+++ b/wikipedia.py
@@ -843,7 +843,7 @@
         data = query.GetData(params, self.site(), sysop=sysop)
         if 'error' in data:
             raise RuntimeError("API query error: %s" % data)
-        if not 'pages' in data['query']:
+        if 'pages' not in data['query']:
             raise RuntimeError("API query error, no pages found: %s" % data)
         pageInfo = data['query']['pages'].values()[0]
         if data['query']['pages'].keys()[0] == "-1":
@@ -903,7 +903,7 @@
         m = self.site().redirectRegex().match(pagetext)
         if m:
             # page text matches the redirect pattern
-            if self.section() and not "#" in m.group(1):
+            if self.section() and "#" not in m.group(1):
                 redirtarget = "%s#%s" % (m.group(1), self.section())
             else:
                 redirtarget = m.group(1)
@@ -1061,7 +1061,7 @@
         m = self.site().redirectRegex().match(pagetext)
         if m:
             # page text matches the redirect pattern
-            if self.section() and not "#" in m.group(1):
+            if self.section() and "#" not in m.group(1):
                 redirtarget = "%s#%s" % (m.group(1), self.section())
             else:
                 redirtarget = m.group(1)
@@ -1620,7 +1620,7 @@
             namesofextensions = []
             for extension in extensions:
                 namesofextensions.append(extension['name'])
-            if not u'Disambiguator' in namesofextensions:
+            if u'Disambiguator' not in namesofextensions:
                 return self._isDisambig_disambiguationspage(get_Index)
             else:
                 return self._isDisambig_disambiguator(get_Index)
@@ -2087,7 +2087,7 @@
             if 'missing' in text[pageid]:
                 self._getexception = NoPage
                 raise NoPage('Page %s does not exist' % 
self.title(asLink=True))
-            elif not 'pageid' in text[pageid]:
+            elif 'pageid' not in text[pageid]:
                 # Don't know what may happen here.
                 # We may want to have better error handling
                 raise Error("BUG> API problem.")
@@ -4973,7 +4973,7 @@
             data['query']['pages'].update(data['entities'])
         if 'error' in data:
             raise RuntimeError("API query error: %s" % data)
-        if not 'pages' in data['query']:
+        if 'pages' not in data['query']:
             raise NoPage(self.site(), unicode(self),
                          "API query error, no pages found: %s" % data)
         pageInfo = ndata['query']['pages'].values()[0]
@@ -5029,7 +5029,7 @@
         m = self.site().redirectRegex().match(pagetext)
         if m:
             # page text matches the redirect pattern
-            if self.section() and not "#" in m.group(1):
+            if self.section() and "#" not in m.group(1):
                 redirtarget = "%s#%s" % (m.group(1), self.section())
             else:
                 redirtarget = m.group(1)
@@ -5660,7 +5660,7 @@
                     if m:
 ##                        output(u"%s is a redirect" % 
page2.title(asLink=True))
                         redirectto = m.group(1)
-                        if section and not "#" in redirectto:
+                        if section and "#" not in redirectto:
                             redirectto += "#" + section
                         page2._getexception = IsRedirectPage
                         page2._redirarg = redirectto
@@ -5843,7 +5843,7 @@
 ##                        output(u"%s is a redirect" % 
page2.title(asLink=True))
                         m = self.site.redirectRegex().match(text)
                         redirectto = m.group(1)
-                        if section and not "#" in redirectto:
+                        if section and "#" not in redirectto:
                             redirectto += "#" + section
                         page2._getexception = IsRedirectPage
                         page2._redirarg = redirectto
@@ -7244,7 +7244,7 @@
         # Get username.
         # The data in anonymous mode had key 'anon'
         # if 'anon' exist, username is IP address, not to collect it right now
-        if not 'anon' in text:
+        if 'anon' not in text:
             self._isLoggedIn[index] = True
             self._userName[index] = text['name']
         else:
@@ -7857,7 +7857,7 @@
                 raise Error
             for c in result['query']['logevents']:
                 if (not namespace or c['ns'] in namespace) and \
-                   not 'actionhidden' in c.keys():
+                   'actionhidden' not in c.keys():
                     if dump:
                         # dump result only.
                         yield c
@@ -8515,7 +8515,7 @@
                 raise RuntimeError("API query warning: %s" % warning)
             if 'error' in data:
                 raise RuntimeError("API query error: %s" % data)
-            if not 'allpages' in data['query']:
+            if 'allpages' not in data['query']:
                 raise RuntimeError("API query error, no pages found: %s" % 
data)
             count = 0
             for p in data['query']['allpages']:
@@ -8728,7 +8728,7 @@
                         break
                     for pages in data['query']['exturlusage']:
                         count += 1
-                        if not siteurl in pages['title']:
+                        if siteurl not in pages['title']:
                             # the links themselves have similar form
                             if pages['pageid'] not in cache:
                                 cache.append(pages['pageid'])
@@ -8769,7 +8769,7 @@
                         #no more page to be fetched for that link
                         break
                     for title in links:
-                        if not siteurl in title:
+                        if siteurl not in title:
                             # the links themselves have similar form
                             if title in cache:
                                 continue
@@ -8799,7 +8799,7 @@
 
         """
         s = s.replace("_", " ").strip(" ").lstrip(":")
-        if not ':' in s:
+        if ':' not in s:
             return False
         first, rest = s.split(':', 1)
         # interwiki codes are case-insensitive
@@ -9507,7 +9507,7 @@
         except KeyError:
             user = None
     key = '%s:%s:%s' % (fam, code, user)
-    if not key in _sites:
+    if key not in _sites:
         _sites[key] = Site(code=code, fam=fam, user=user)
     ret = _sites[key]
     if not ret.family.isPublic(code) and not noLogin:
diff --git a/wiktionary.py b/wiktionary.py
index 2393ca8..62b12fc 100644
--- a/wiktionary.py
+++ b/wiktionary.py
@@ -678,7 +678,7 @@
         self.meanings.setdefault(term.pos, []).append(meaning)
         # we only need each part of speech once in our list where we keep track
         # of the order
-        if not term.pos in self.posorder:
+        if term.pos not in self.posorder:
             self.posorder.append(term.pos)
 
     def getMeanings(self):
diff --git a/xmlreader.py b/xmlreader.py
index dab0687..1df969f 100644
--- a/xmlreader.py
+++ b/xmlreader.py
@@ -283,7 +283,7 @@
     def parse(self):
         """Return a generator that will yield XmlEntry objects"""
         print 'Reading XML dump...'
-        if not 'iterparse' in globals():
+        if 'iterparse' not in globals():
             pywikibot.warning(
 u'''cElementTree not found. Using slower fallback solution.
 Consider installing the python-celementtree package.''')

-- 
To view, visit https://gerrit.wikimedia.org/r/130545
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: merged
Gerrit-Change-Id: I7ca12dbc3a0ec634d8ebea18637e110109aacb68
Gerrit-PatchSet: 6
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Ricordisamoa <[email protected]>
Gerrit-Reviewer: Ladsgroup <[email protected]>
Gerrit-Reviewer: Siebrand <[email protected]>
Gerrit-Reviewer: Xqt <[email protected]>
Gerrit-Reviewer: jenkins-bot <>

_______________________________________________
Pywikibot-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/pywikibot-commits

Reply via email to