John Vandenberg has uploaded a new change for review.

  https://gerrit.wikimedia.org/r/165453

Change subject: Docstring fixes in scripts
......................................................................

Docstring fixes in scripts

Change-Id: Ic5a25fac9592fead9a6d8b0748bf13947ef7f2c7
---
M scripts/__init__.py
M scripts/category_redirect.py
M scripts/clean_sandbox.py
M scripts/commons_link.py
M scripts/commonscat.py
M scripts/cosmetic_changes.py
M scripts/create_categories.py
M scripts/data_ingestion.py
M scripts/delete.py
M scripts/disambredir.py
M scripts/editarticle.py
M scripts/featured.py
M scripts/fixing_redirects.py
M scripts/flickrripper.py
M scripts/freebasemappingupload.py
M scripts/image.py
M scripts/imagerecat.py
M scripts/imagetransfer.py
M scripts/imageuncat.py
M scripts/isbn.py
M scripts/lonelypages.py
M scripts/misspelling.py
M scripts/movepages.py
M scripts/noreferences.py
M scripts/nowcommons.py
M scripts/pagefromfile.py
M scripts/redirect.py
M scripts/reflinks.py
M scripts/replace.py
M scripts/replicate_wiki.py
M scripts/revertbot.py
M scripts/script_wui.py
M scripts/selflink.py
M scripts/solve_disambiguation.py
M scripts/spamremove.py
M scripts/template.py
M scripts/templatecount.py
M scripts/touch.py
M scripts/transferbot.py
M scripts/unlink.py
M scripts/unusedfiles.py
M scripts/upload.py
M scripts/version.py
M scripts/weblinkchecker.py
M scripts/welcome.py
M tox.ini
46 files changed, 421 insertions(+), 200 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/pywikibot/core 
refs/changes/53/165453/1

diff --git a/scripts/__init__.py b/scripts/__init__.py
index c47da75..6eb2664 100644
--- a/scripts/__init__.py
+++ b/scripts/__init__.py
@@ -1 +1 @@
-# THIS DIRECTORY IS TO HOLD BOT SCRIPTS FOR THE NEW FRAMEWORK
+""" THIS DIRECTORY IS TO HOLD BOT SCRIPTS FOR THE NEW FRAMEWORK. """
diff --git a/scripts/category_redirect.py b/scripts/category_redirect.py
index 1cb5acb..382bab7 100755
--- a/scripts/category_redirect.py
+++ b/scripts/category_redirect.py
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-"""This bot will move pages out of redirected categories
+"""This bot will move pages out of redirected categories.
 
 Usage: category_redirect.py [options]
 
@@ -37,7 +37,10 @@
 
 class CategoryRedirectBot(object):
 
+    """Page category update bot."""
+
     def __init__(self):
+        """Constructor."""
         self.cooldown = 7  # days
         self.site = pywikibot.Site()
         self.catprefix = self.site.namespace(14) + ":"
diff --git a/scripts/clean_sandbox.py b/scripts/clean_sandbox.py
index 47a5edb..8c6ca27 100755
--- a/scripts/clean_sandbox.py
+++ b/scripts/clean_sandbox.py
@@ -1,8 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 """
-This bot cleans a (user) sandbox by replacing the current contents with
-predefined text.
+This bot resets a (user) sandbox with predefined text.
 
 This script understands the following command-line arguments:
 
@@ -137,6 +136,9 @@
 
 
 class SandboxBot(Bot):
+
+    """Sandbox reset bot."""
+
     availableOptions = {
         'hours': 1,
         'no_repeat': True,
@@ -149,6 +151,7 @@
     }
 
     def __init__(self, **kwargs):
+        """Constructor."""
         super(SandboxBot, self).__init__(**kwargs)
         if self.getOption('delay') is None:
             d = min(15, max(5, int(self.getOption('hours') * 60)))
@@ -177,6 +180,7 @@
             sys.exit(0)
 
     def run(self):
+        """Run bot."""
         self.site.login()
         while True:
             wait = False
diff --git a/scripts/commons_link.py b/scripts/commons_link.py
index 8ba3204..7436ffb 100644
--- a/scripts/commons_link.py
+++ b/scripts/commons_link.py
@@ -43,6 +43,9 @@
 
 
 class CommonsLinkBot(Bot):
+
+    """Commons linking bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'action': None,
diff --git a/scripts/commonscat.py b/scripts/commonscat.py
index 96dedfb..0c9ec78 100755
--- a/scripts/commonscat.py
+++ b/scripts/commonscat.py
@@ -2,6 +2,7 @@
 # -*- coding: utf-8  -*-
 """
 With this tool you can add the template {{commonscat}} to categories.
+
 The tool works by following the interwiki links. If the template is present on
 another langauge page, the bot will use it.
 
@@ -236,6 +237,8 @@
 
 class CommonscatBot(Bot):
 
+    """Commons categorisation bot."""
+
     def __init__(self, generator, always, summary=None):
         super(CommonscatBot, self).__init__(always=always)
         self.generator = generator
@@ -262,6 +265,7 @@
     @classmethod
     def getCommonscatTemplate(self, code=None):
         """Get the template name of a site. Expects the site code.
+
         Return as tuple containing the primary template and it's alternatives
 
         """
@@ -271,7 +275,7 @@
             return commonscatTemplates[u'_default']
 
     def skipPage(self, page):
-        """Do we want to skip this page?"""
+        """Determine if the page should be skipped."""
         if page.site.code in ignoreTemplates:
             templatesInThePage = page.templates()
             templatesWithParams = page.templatesWithParams()
@@ -288,7 +292,10 @@
         return False
 
     def addCommonscat(self, page):
-        """Take a page. Go to all the interwiki page looking for a commonscat
+        """
+        Add CommonsCat template to page.
+
+        Take a page. Go to all the interwiki page looking for a commonscat
         template. When all the interwiki's links are checked and a proper
         category is found add it to the page.
 
@@ -418,7 +425,10 @@
         return u''
 
     def getCommonscatLink(self, wikipediaPage=None):
-        """Go through the page and return a tuple of (<templatename>, 
<target>)"""
+        """Find CommonsCat template on page.
+
+        @rtype: tuple of (<templatename>, <target>)
+        """
         primaryCommonscat, commonscatAlternatives = self.getCommonscatTemplate(
             wikipediaPage.site.code)
         commonscatTemplate = u''
@@ -444,7 +454,8 @@
         return None
 
     def checkCommonscatLink(self, name=""):
-        """ This function will return the name of a valid commons category
+        """ Return the name of a valid commons category.
+
         If the page is a redirect this function tries to follow it.
         If the page doesnt exists the function will return an empty string
 
diff --git a/scripts/cosmetic_changes.py b/scripts/cosmetic_changes.py
index a79da87..d3e32c0 100755
--- a/scripts/cosmetic_changes.py
+++ b/scripts/cosmetic_changes.py
@@ -1,9 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-This module can do slight modifications to a wiki page source code such that
-the code looks cleaner. The changes are not supposed to change the look of the
-rendered wiki page.
+This module can do slight modifications to tidy a wiki page's source code.
+
+The changes are not supposed to change the look of the rendered wiki page.
 
 The following parameters are supported:
 
@@ -157,6 +157,8 @@
 
 class CosmeticChangesToolkit:
 
+    """Cosmetic changes toolkit."""
+
     def __init__(self, site, debug=False, redirect=False, namespace=None,
                  pageTitle=None, ignore=CANCEL_ALL):
         self.site = site
@@ -241,6 +243,7 @@
     def fixSelfInterwiki(self, text):
         """
         Interwiki links to the site itself are displayed like local links.
+
         Remove their language code prefix.
         """
         if not self.talkpage and pywikibot.calledModuleName() != 'interwiki':
@@ -251,6 +254,8 @@
 
     def standardizePageFooter(self, text):
         """
+        Standardize page footer.
+
         Makes sure that interwiki links, categories and star templates are
         put to the correct position and into the right order. This combines the
         old instances standardizeInterwiki and standardizeCategories
@@ -359,7 +364,7 @@
         return text
 
     def translateAndCapitalizeNamespaces(self, text):
-        """Makes sure that localized namespace names are used."""
+        """Use localized namespace names."""
         # arz uses english stylish codes
         if self.site.sitename() == 'wikipedia:arz':
             return text
@@ -404,7 +409,7 @@
         return text
 
     def translateMagicWords(self, text):
-        """Makes sure that localized namespace names are used."""
+        """Use localized magic words."""
         # not wanted at ru
         # arz uses english stylish codes
         if self.site.code not in ['arz', 'ru']:
@@ -587,10 +592,13 @@
 
     def removeNonBreakingSpaceBeforePercent(self, text):
         """
+        Insert a non-breaking space between number and percent sign.
+
         Newer MediaWiki versions automatically place a non-breaking space in
         front of a percent sign, so it is no longer required to place it
         manually.
 
+        FIXME: which version should this be run on?
         """
         text = textlib.replaceExcept(text, r'(\d)&nbsp;%', r'\1 %',
                                      ['timeline'])
@@ -598,8 +606,8 @@
 
     def cleanUpSectionHeaders(self, text):
         """
-        For better readability of section header source code, puts a space
-        between the equal signs and the title.
+        Add a space between the equal signs and the section title.
+
         Example: ==Section title== becomes == Section title ==
 
         NOTE: This space is recommended in the syntax help on the English and
@@ -614,8 +622,7 @@
 
     def putSpacesInLists(self, text):
         """
-        For better readability of bullet list and enumeration wiki source code,
-        puts a space between the * or # and the text.
+        Add a space between the * or # and the text.
 
         NOTE: This space is recommended in the syntax help on the English,
         German, and French Wikipedia. It might be that it is not wanted on 
other
@@ -884,6 +891,9 @@
 
 
 class CosmeticChangesBot(Bot):
+
+    """Cosmetic changes bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'async': False,
diff --git a/scripts/create_categories.py b/scripts/create_categories.py
index df750a8..c7ebab5 100755
--- a/scripts/create_categories.py
+++ b/scripts/create_categories.py
@@ -37,6 +37,9 @@
 
 
 class CreateCategoriesBot(Bot):
+
+    """Category creator bot."""
+
     def __init__(self, generator, parent, basename, **kwargs):
         super(CreateCategoriesBot, self).__init__(**kwargs)
         self.generator = generator
diff --git a/scripts/data_ingestion.py b/scripts/data_ingestion.py
index 70856eb..72e22f5 100755
--- a/scripts/data_ingestion.py
+++ b/scripts/data_ingestion.py
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
-"""A generic bot to do data ingestion (batch uploading) to Commons"""
+"""A generic bot to do data ingestion (batch uploading) to Commons."""
 #
 # (C) Pywikibot team, 2013
 #
@@ -42,6 +42,7 @@
     """
 
     def __init__(self, URL, metadata):
+        """Constructor."""
         self.URL = URL
         self.metadata = metadata
         self.metadata["_url"] = URL
@@ -66,7 +67,9 @@
     def findDuplicateImages(self,
                             site=pywikibot.Site(u'commons', u'commons')):
         """
-        Takes the photo, calculates the SHA1 hash and asks the MediaWiki api
+        Find duplicates of the photo.
+
+        Calculates the SHA1 hash and asks the MediaWiki api
         for a list of duplicates.
 
         TODO: Add exception handling, fix site thing
@@ -77,8 +80,12 @@
 
     def getTitle(self, fmt):
         """
-        Given a format string with %(name)s entries, returns the string
-        formatted with metadata
+        Populate format string with %(name)s entries using metadata.
+
+        @param fmt: format string
+        @type fmt: unicode
+        @return: formatted string
+        @rtype: unicode
         """
         return fmt % self.metadata
 
@@ -102,6 +109,7 @@
 
 
 def CSVReader(fileobj, urlcolumn, *args, **kwargs):
+    """CSV reader."""
     import csv
     reader = csv.DictReader(fileobj, *args, **kwargs)
 
@@ -110,6 +118,9 @@
 
 
 class DataIngestionBot:
+
+    """Data ingestion bot."""
+
     def __init__(self, reader, titlefmt, pagefmt,
                  site=pywikibot.Site(u'commons', u'commons')):
         self.reader = reader
diff --git a/scripts/delete.py b/scripts/delete.py
index f686281..1c3abdc 100644
--- a/scripts/delete.py
+++ b/scripts/delete.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 """
 This script can be used to delete and undelete pages en masse.
+
 Of course, you will need an admin account on the relevant wiki.
 
 These command line parameters can be used to specify which pages to work on:
@@ -43,6 +44,7 @@
 
 
 class DeletionRobot(Bot):
+
     """ This robot allows deletion of pages en masse. """
 
     def __init__(self, generator, summary, **kwargs):
@@ -63,9 +65,10 @@
         self.summary = summary
 
     def run(self):
-        """ Start the robot's action:
-        Loop through everything in the page generator and delete it.
+        """
+        Run bot.
 
+        Loop through everything in the page generator and delete it.
         """
         for page in self.generator:
             self.current_page = page
diff --git a/scripts/disambredir.py b/scripts/disambredir.py
index de464c4..e9393d7 100644
--- a/scripts/disambredir.py
+++ b/scripts/disambredir.py
@@ -1,8 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-Goes through the disambiguation pages, checks their links, and asks for
-each link that goes to a redirect page whether it should be replaced.
+User assisted updating redirect links on disambiguation pages.
 
 Usage:
     python disambredir.py [start]
diff --git a/scripts/editarticle.py b/scripts/editarticle.py
index fc111c6..8f98fe8 100755
--- a/scripts/editarticle.py
+++ b/scripts/editarticle.py
@@ -28,6 +28,9 @@
 
 
 class ArticleEditor(object):
+
+    """Edit a wiki page."""
+
     # join lines if line starts with this ones
     # TODO: No apparent usage
     # joinchars = string.letters + '[]' + string.digits
@@ -58,7 +61,7 @@
             self.options.page = args[0]
 
     def setpage(self):
-        """Sets page and page title."""
+        """Set page and page title."""
         site = pywikibot.Site()
         pageTitle = self.options.page or pywikibot.input(u"Page to edit:")
         self.page = pywikibot.Page(pywikibot.Link(pageTitle, site))
diff --git a/scripts/featured.py b/scripts/featured.py
index 139543c..7f15f31 100644
--- a/scripts/featured.py
+++ b/scripts/featured.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 """
+Manage featured/good article/list status template.
+
 This script understands various command-line arguments:
 
  Task commands:
@@ -205,6 +207,9 @@
 
 
 class FeaturedBot(pywikibot.Bot):
+
+    """Featured article bot."""
+
     # Bot configuration.
     # Only the keys of the dict can be passed as init options
     # The values are the default values
@@ -247,7 +252,7 @@
             self.tasks = ['featured']
 
     def itersites(self, task):
-        """generator for site codes to be processed."""
+        """Generator for site codes to be processed."""
         def _generator():
             if task == 'good':
                 item_no = good_name['wikidata'][1]
@@ -501,7 +506,6 @@
         remember the page in the cache dict.
 
         """
-
         tosite = self.site
         if fromsite.code not in self.cache:
             self.cache[fromsite.code] = {}
@@ -533,7 +537,6 @@
 
     def add_template(self, source, dest, task, fromsite):
         """Place or remove the Link_GA/FA template on/from a page."""
-
         def compile_link(site, templates):
             """compile one link template list."""
             findtemplate = '(%s)' % '|'.join(templates)
diff --git a/scripts/fixing_redirects.py b/scripts/fixing_redirects.py
index cede70d..ba64220 100644
--- a/scripts/fixing_redirects.py
+++ b/scripts/fixing_redirects.py
@@ -1,8 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-This script has the intention to correct all redirect
-links in featured pages or only one page of each wiki.
+Correct all redirect links in featured pages or only one page of each wiki.
 
 Can be using with:
 &params;
diff --git a/scripts/flickrripper.py b/scripts/flickrripper.py
index c093f1e..d635f8d 100644
--- a/scripts/flickrripper.py
+++ b/scripts/flickrripper.py
@@ -1,7 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-Tool to copy a flickr stream to Commons
+Tool to copy a flickr stream to Commons.
 
 # Get a set to work on (start with just a username).
 # * Make it possible to delimit the set (from/to)
@@ -81,7 +81,7 @@
 
 def getPhoto(flickr=None, photo_id=''):
     """
-    Get the photo info and the photo sizes so we can use these later on
+    Get the photo info and the photo sizes so we can use these later on.
 
     TODO: Add exception handling
 
@@ -100,7 +100,7 @@
 
 def isAllowedLicense(photoInfo=None):
     """
-    Check if the image contains the right license
+    Check if the image contains the right license.
 
     TODO: Maybe add more licenses
     """
@@ -133,7 +133,9 @@
 
 def findDuplicateImages(photo=None,
                         site=pywikibot.Site(u'commons', u'commons')):
-    """ Take the photo, calculate the SHA1 hash and ask the MediaWiki api
+    """ Find duplicate images.
+
+    Take the photo, calculate the SHA1 hash and ask the MediaWiki api
     for a list of duplicates.
 
     TODO: Add exception handling, fix site thing
@@ -155,7 +157,7 @@
 
 def getFlinfoDescription(photo_id=0):
     """
-    Get the description from http://wikipedia.ramselehof.de/flinfo.php
+    Get the description from http://wikipedia.ramselehof.de/flinfo.php.
 
     TODO: Add exception handling, try a couple of times
     """
@@ -168,8 +170,9 @@
 
 
 def getFilename(photoInfo=None, site=None, project=u'Flickr'):
-    """ Build a good filename for the upload based on the username and the
-    title. Prevents naming collisions.
+    """ Build a good filename for the upload based on the username and title.
+
+    Prevents naming collisions.
 
     """
     if not site:
@@ -209,8 +212,9 @@
 
 
 def cleanUpTitle(title):
-    """ Clean up the title of a potential MediaWiki page. Otherwise the title 
of
-    the page might not be allowed by the software.
+    """ Clean up the title of a potential MediaWiki page.
+
+    Otherwise the title of the page might not be allowed by the software.
 
     """
     title = title.strip()
@@ -236,8 +240,9 @@
 
 def buildDescription(flinfoDescription=u'', flickrreview=False, reviewer=u'',
                      override=u'', addCategory=u'', removeCategories=False):
-    """ Build the final description for the image. The description is based on
-    the info from flickrinfo and improved.
+    """ Build the final description for the image.
+
+    The description is based on the info from flickrinfo and improved.
 
     """
     description = u'== {{int:filedesc}} ==\n%s' % flinfoDescription
@@ -324,6 +329,7 @@
     """ The user dialog. """
 
     def __init__(self, photoDescription, photo, filename):
+        """Constructor."""
         self.root = Tk()
         # "%dx%d%+d%+d" % (width, height, xoffset, yoffset)
         self.root.geometry("%ix%i+10-10" % (config.tkhorsize, 
config.tkvertsize))
@@ -396,9 +402,10 @@
         self.root.destroy()
 
     def run(self):
-        """ Activate the dialog and return the new name and if the image is
-        skipped.
+        """ Activate the dialog.
 
+        @return: new description, name, and if the image is skipped
+        @rtype: tuple of (unicode, unicode, bool)
         """
         self.root.mainloop()
         return self.photoDescription, self.filename, self.skip
@@ -504,7 +511,7 @@
 
 def usage():
     """
-    Print usage information
+    Print usage information.
 
     TODO : Need more.
     """
diff --git a/scripts/freebasemappingupload.py b/scripts/freebasemappingupload.py
index 1d45947..7b7c33c 100644
--- a/scripts/freebasemappingupload.py
+++ b/scripts/freebasemappingupload.py
@@ -1,7 +1,8 @@
 #!/usr/bin/python
 # -*- coding: utf-8    -*-
 """
-Script to upload the mappings of Freebase to Wikidata
+Script to upload the mappings of Freebase to Wikidata.
+
 Can be easily adapted to upload other String identifiers as well
 
 This bot needs the dump from
@@ -31,6 +32,8 @@
 
 class FreebaseMapperRobot:
 
+    """Freebase Mapping bot."""
+
     def __init__(self, filename):
         self.repo = pywikibot.Site('wikidata', 'wikidata').data_repository()
         self.filename = filename
diff --git a/scripts/image.py b/scripts/image.py
index 0fb191d..2b7d6b9 100644
--- a/scripts/image.py
+++ b/scripts/image.py
@@ -1,7 +1,6 @@
 # -*- coding: utf-8 -*-
 """
-This script can be used to change one image to another or remove an image
-entirely.
+This script can be used to change one image to another or remove an image.
 
 Syntax: python image.py image_name [new_image_name]
 
@@ -49,10 +48,7 @@
 
 class ImageRobot(Bot):
 
-    """
-    This bot will load all pages yielded by a file links image page generator 
and
-    replace or remove all occurences of the old image.
-    """
+    """This bot will replace or remove all occurences of an old image."""
 
     # Summary messages for replacing images
     msg_replace = {
diff --git a/scripts/imagerecat.py b/scripts/imagerecat.py
index a08ab73..13094b2 100644
--- a/scripts/imagerecat.py
+++ b/scripts/imagerecat.py
@@ -74,8 +74,9 @@
 
 
 def categorizeImages(generator, onlyFilter, onlyUncat):
-    """ Loop over all images in generator and try to categorize them. Get
-    category suggestions from CommonSense.
+    """ Loop over all images in generator and try to categorize them.
+
+    Get category suggestions from CommonSense.
 
     """
     for page in generator:
@@ -112,8 +113,9 @@
 
 
 def getCommonshelperCats(imagepage):
-    """ Get category suggestions from CommonSense. Parse them and return a list
-    of suggestions.
+    """ Get category suggestions from CommonSense.
+
+    @rtype: list of unicode
 
     """
     commonshelperCats = []
@@ -212,8 +214,9 @@
 
 def getOpenStreetMap(latitude, longitude):
     """
-    Get the result from https://nominatim.openstreetmap.org/reverse
-    and put it in a list of tuples to play around with
+    Get the result from https://nominatim.openstreetmap.org/reverse .
+
+    @rtype: list of tuples
     """
     result = []
     gotInfo = False
@@ -246,7 +249,7 @@
 
 
 def getCategoryByName(name, parent=u'', grandparent=u''):
-
+    """Get category by name."""
     if not parent == u'':
         workname = name.strip() + u',_' + parent.strip()
         workcat = pywikibot.Category(pywikibot.Site(u'commons', u'commons'), 
workname)
@@ -336,6 +339,7 @@
 
 def filterCountries(categories):
     """ Try to filter out ...by country categories.
+
     First make a list of any ...by country categories and try to find some
     countries. If a by country category has a subcategoy containing one of the
     countries found, add it. The ...by country categories remain in the set and
diff --git a/scripts/imagetransfer.py b/scripts/imagetransfer.py
index 84eea97..74a0b71 100644
--- a/scripts/imagetransfer.py
+++ b/scripts/imagetransfer.py
@@ -151,6 +151,9 @@
 
 
 class ImageTransferBot:
+
+    """Image transfer bot."""
+
     def __init__(self, generator, targetSite=None, interwiki=False,
                  keep_name=False, ignore_warning=False):
         self.generator = generator
@@ -160,11 +163,10 @@
         self.ignore_warning = ignore_warning
 
     def transferImage(self, sourceImagePage):
-        """Get a wikilink to an image, download it and its description,
-           and upload it to another wikipedia.
-           Return the filename which was used to upload the image
-           This function is used by imagetransfer.py and by copy_table.py
+        """
+        Download image and its description, and upload it to another site.
 
+        @return: the filename which was used to upload the image
         """
         sourceSite = sourceImagePage.site
         url = sourceImagePage.fileUrl().encode('utf-8')
diff --git a/scripts/imageuncat.py b/scripts/imageuncat.py
index e055f5b..1f90d80 100755
--- a/scripts/imageuncat.py
+++ b/scripts/imageuncat.py
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 """
 Program to add uncat template to images without categories at commons.
+
 See imagerecat.py (still working on that one) to add these images to 
categories.
 
 """
@@ -1235,12 +1236,11 @@
 
 
 def uploadedYesterday(site):
-    '''
+    """
     Return a pagegenerator containing all the pictures uploaded yesterday.
+
     Should probably copied to somewhere else
-
-    '''
-
+    """
     today = pywikibot.Timestamp.utcnow()
     yesterday = today + timedelta(days=-1)
 
@@ -1249,11 +1249,12 @@
 
 
 def recentChanges(site=None, delay=0, block=70):
-    '''
+    """
     Return a pagegenerator containing all the images edited in a certain 
timespan.
+
     The delay is the amount of minutes to wait and the block is the timespan 
to return images in.
     Should probably be copied to somewhere else
-    '''
+    """
     rcstart = site.getcurrenttime() + timedelta(minutes=-(delay + block))
     rcend = site.getcurrenttime() + timedelta(minutes=-delay)
 
@@ -1267,13 +1268,13 @@
 
 
 def isUncat(page):
-    '''
-    Do we want to skip this page?
+    """
+    Do we want to skip this page.
 
     If we found a category which is not in the ignore list it means
     that the page is categorized so skip the page.
     If we found a template which is in the ignore list, skip the page.
-    '''
+    """
     pywikibot.output(u'Working on ' + page.title())
 
     for category in page.categories():
@@ -1298,9 +1299,12 @@
 
 
 def addUncat(page):
-    '''
-    Add the uncat template to the page
-    '''
+    """
+    Add the uncat template to the page.
+
+    @param page: Page to be modified
+    @rtype: Page
+    """
     newtext = page.get() + puttext
     pywikibot.showDiff(page.get(), newtext)
     try:
diff --git a/scripts/isbn.py b/scripts/isbn.py
index d759003..0e144a8 100755
--- a/scripts/isbn.py
+++ b/scripts/isbn.py
@@ -2,8 +2,7 @@
 # -*- coding: utf-8  -*-
 
 """
-This script goes over multiple pages of the home wiki, and reports invalid
-ISBN numbers.
+This script reports and fixes invalid ISBN numbers.
 
 Additionally, it can convert all ISBN-10 codes to the ISBN-13 format, and
 correct the ISBN format by placing hyphens.
@@ -1160,16 +1159,16 @@
 
 
 class InvalidIsbnException(pywikibot.Error):
+
     """Invalid ISBN."""
 
 
 class ISBN:
-    """
-    Abstract superclass
-    """
+
+    """Abstract superclass."""
 
     def format(self):
-        """Puts hyphens into this ISBN number."""
+        """Put hyphens into this ISBN number."""
         result = ''
         rest = ''
         for digit in self.digits():
@@ -1209,6 +1208,9 @@
 
 
 class ISBN13(ISBN):
+
+    """ISBN 13."""
+
     def __init__(self, code, checksumMissing=False):
         self.code = code
         if checksumMissing:
@@ -1219,7 +1221,7 @@
         return ['978', '979']
 
     def digits(self):
-        """Returns a list of the digits in the ISBN code."""
+        """Return a list of the digits in the ISBN code."""
         result = []
         for c in self.code:
             if c.isdigit():
@@ -1248,6 +1250,9 @@
 
 
 class ISBN10(ISBN):
+
+    """ISBN 10."""
+
     def __init__(self, code):
         self.code = code
         self.checkValidity()
@@ -1256,7 +1261,7 @@
         return []
 
     def digits(self):
-        """Returns a list of the digits and Xs in the ISBN code."""
+        """Return a list of the digits and Xs in the ISBN code."""
         result = []
         for c in self.code:
             if c.isdigit() or c in 'Xx':
@@ -1267,10 +1272,7 @@
         return result
 
     def checkChecksum(self):
-        """
-        Raises an InvalidIsbnException if the checksum shows that the
-        ISBN is incorrect.
-        """
+        """Raise an InvalidIsbnException if the ISBN checksum is incorrect."""
         # See https://en.wikipedia.org/wiki/ISBN#Check_digit_in_ISBN_10
         sum = 0
         for i in range(0, 9):
@@ -1297,8 +1299,9 @@
 
     def toISBN13(self):
         """
-        Creates a 13-digit ISBN from this 10-digit ISBN by prefixing the GS1
-        prefix '978' and recalculating the checksum.
+        Create a 13-digit ISBN from this 10-digit ISBN.
+
+        Adds the GS1 prefix '978' and recalculates the checksum.
         The hyphenation structure is taken from the format of the original
         ISBN number.
         """
@@ -1317,6 +1320,7 @@
 
 
 def getIsbn(code):
+    """Return an ISBN object for the code."""
     try:
         i = ISBN13(code)
     except InvalidIsbnException as e13:
@@ -1341,6 +1345,7 @@
 
 
 def hyphenateIsbnNumbers(text):
+    """Helper function to hyphenate an ISBN."""
     isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[\dXx])')
     text = isbnR.sub(_hyphenateIsbnNumber, text)
     return text
@@ -1359,6 +1364,7 @@
 
 
 def convertIsbn10toIsbn13(text):
+    """Helper function to convert ISBN 10 to ISBN 13."""
     isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[Xx]?)')
     text = isbnR.sub(_isbn10toIsbn13, text)
     return text
@@ -1366,6 +1372,8 @@
 
 class IsbnBot(Bot):
 
+    """ISBN bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'to13': False,
diff --git a/scripts/lonelypages.py b/scripts/lonelypages.py
index a33fbe7..f7b6885 100644
--- a/scripts/lonelypages.py
+++ b/scripts/lonelypages.py
@@ -1,9 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 """
-This is a script written to add the template "orphan" to the pages that aren't
-linked by other pages. It can give some strange Errors sometime, I hope that
-all of them are fixed in this version.
+This is a script written to add the template "orphan" to pages.
 
 These command line parameters can be used to specify which pages to work on:
 
@@ -76,6 +74,9 @@
 
 
 class LonelyPagesBot(Bot):
+
+    """Orphan page tagging bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'enablePage': None,    # Check if someone set an enablePage or not
diff --git a/scripts/misspelling.py b/scripts/misspelling.py
index 1dbe005..104a9a1 100644
--- a/scripts/misspelling.py
+++ b/scripts/misspelling.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8  -*-
 """
-This script works similar to solve_disambiguation.py. It is supposed to fix
-links that contain common spelling mistakes. This is only possible on wikis
-that have a template for these misspellings.
+This script fixs links that contain common spelling mistakes.
+
+This is only possible on wikis that have a template for these misspellings.
 
 Command line options:
 
@@ -45,6 +45,8 @@
 
 class MisspellingRobot(DisambiguationRobot):
 
+    """Spelling bot."""
+
     misspellingTemplate = {
         'da': None,                     # uses simple redirects
         'de': u'Falschschreibung',
diff --git a/scripts/movepages.py b/scripts/movepages.py
index 90ae739..809bda5 100644
--- a/scripts/movepages.py
+++ b/scripts/movepages.py
@@ -53,6 +53,9 @@
 
 
 class MovePagesBot(Bot):
+
+    """Page move bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'prefix': None,
diff --git a/scripts/noreferences.py b/scripts/noreferences.py
index e923314..685c8ef 100755
--- a/scripts/noreferences.py
+++ b/scripts/noreferences.py
@@ -1,8 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
-
 """
-This script goes over multiple pages, searches for pages where <references />
+This script adds a missing references section to pages.
+
+It goes over multiple pages, searches for pages where <references />
 is missing although a <ref> tag is present, and in that case adds a new
 references section.
 
@@ -426,12 +427,15 @@
 
     """
     Generator which will yield Pages that might lack a references tag.
+
     These pages will be retrieved from a local XML dump file
     (pages-articles or pages-meta-current).
     """
 
     def __init__(self, xmlFilename):
         """
+        Constructor.
+
         Arguments:
             * xmlFilename  - The dump's path, either absolute or relative
         """
@@ -452,7 +456,10 @@
 
 class NoReferencesBot(Bot):
 
+    """References section bot."""
+
     def __init__(self, generator, **kwargs):
+        """Constructor."""
         self.availableOptions.update({
             'verbose': True,
         })
@@ -478,7 +485,7 @@
             self.referencesText = u'<references />'
 
     def lacksReferences(self, text):
-        """Checks whether or not the page is lacking a references tag."""
+        """Check whether or not the page is lacking a references tag."""
         oldTextCleaned = textlib.removeDisabledParts(text)
         if self.referencesR.search(oldTextCleaned) or \
            self.referencesTagR.search(oldTextCleaned):
@@ -503,8 +510,9 @@
 
     def addReferences(self, oldText):
         """
-        Tries to add a references tag into an existing section where it fits
-        into. If there is no such section, creates a new section containing
+        Add a references tag into an existing section where it fits into.
+
+        If there is no such section, creates a new section containing
         the references tag.
         * Returns : The modified pagetext
 
diff --git a/scripts/nowcommons.py b/scripts/nowcommons.py
index 939f481..a913891 100644
--- a/scripts/nowcommons.py
+++ b/scripts/nowcommons.py
@@ -1,8 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-Script to delete files that are also present on Wikimedia Commons on a local
-wiki. Do not run this script on Wikimedia Commons itself. It works based on
+Script to delete files that are also present on Wikimedia Commons.
+
+Do not run this script on Wikimedia Commons itself. It works based on
 a given array of templates defined below.
 
 Files are downloaded and compared. If the files match, it can be deleted on
@@ -182,6 +183,9 @@
 
 
 class NowCommonsDeleteBot(Bot):
+
+    """Bot to delete migrated files."""
+
     def __init__(self, **kwargs):
         self.availableOptions.update({
             'replace': False,
diff --git a/scripts/pagefromfile.py b/scripts/pagefromfile.py
index 2d268bc..8f33d46 100644
--- a/scripts/pagefromfile.py
+++ b/scripts/pagefromfile.py
@@ -1,6 +1,8 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
+Creates wiki pages using a file as input.
+
 This bot takes its input from a file that contains a number of
 pages to be put on the wiki. The pages should all have the same
 begin and end text (which may not overlap).
@@ -70,10 +72,7 @@
 
 class PageFromFileRobot(Bot):
 
-    """
-    Responsible for writing pages to the wiki, with the titles and contents
-    given by a PageFromFileReader.
-    """
+    """Write pages to the wiki, with data given by a PageFromFileReader."""
 
     def __init__(self, reader, **kwargs):
         self.availableOptions.update({
@@ -170,11 +169,11 @@
     def __init__(self, filename, pageStartMarker, pageEndMarker,
                  titleStartMarker, titleEndMarker, include, notitle):
         """Constructor.
+
         Check if self.file name exists. If not, ask for a new filename.
         User can quit.
 
         """
-
         self.filename = filename
         self.pageStartMarker = pageStartMarker
         self.pageEndMarker = pageEndMarker
diff --git a/scripts/redirect.py b/scripts/redirect.py
index 8790a68..79a69d2 100755
--- a/scripts/redirect.py
+++ b/scripts/redirect.py
@@ -1,9 +1,10 @@
 #! /usr/bin/python
 # -*- coding: utf-8 -*-
 """
-Script to resolve double redirects, and to delete broken redirects. Requires
-access to MediaWiki's maintenance pages or to a XML dump file. Delete
-function requires adminship.
+Script to resolve double redirects, and to delete broken redirects.
+
+Requires access to MediaWiki's maintenance pages or to a XML dump file.
+Delete function requires adminship.
 
 Syntax:
 
@@ -76,6 +77,9 @@
 
 
 class RedirectGenerator:
+
+    """Redirect generator."""
+
     def __init__(self, xmlFilename=None, namespaces=[], offset=-1,
                  use_move_log=False, use_api=False, start=None, until=None,
                  number=None, step=None):
@@ -94,6 +98,8 @@
 
     def get_redirects_from_dump(self, alsoGetPageTitles=False):
         """
+        Extract redirects from dump.
+
         Load a local XML dump file, look at all pages which have the
         redirect flag set, and find out where they're pointing at. Return
         a dictionary where the redirect names are the keys and the redirect
@@ -180,10 +186,7 @@
                 yield p
 
     def _next_redirect_group(self):
-        """
-        Return a generator that retrieves pageids from the API 500 at a time
-        and yields them as a list
-        """
+        """Generator that yields batches of 500 redirects as a list."""
         apiQ = []
         for page in self.get_redirect_pages_via_api():
             apiQ.append(str(page._pageid))
@@ -195,7 +198,8 @@
 
     def get_redirects_via_api(self, maxlen=8):
         """
-        Return a generator that yields tuples of data about redirect Pages:
+        Return a generator that yields tuples of data about redirect Pages.
+
             0 - page title of a redirect page
             1 - type of redirect:
                          0 - broken redirect, target page title missing
@@ -331,7 +335,6 @@
     def get_moved_pages_redirects(self):
         """Generate redirects to recently-moved pages."""
         # this will run forever, until user interrupts it
-
         if self.offset <= 0:
             self.offset = 1
         start = (datetime.datetime.utcnow() -
@@ -367,6 +370,9 @@
 
 
 class RedirectRobot(Bot):
+
+    """Redirect bot."""
+
     def __init__(self, action, generator, **kwargs):
         self.availableOptions.update({
             'number': None,
@@ -380,10 +386,12 @@
         self._valid_template = None
 
     def has_valid_template(self, twtitle):
-        """Check whether a template from translatewiki.net does exist on real
-        wiki. We assume we are always working on self.site
+        """
+        Check whether a template from translatewiki.net exists on the wiki.
 
-        @param twtitle - a sting which is the i18n key
+        We assume we are always working on self.site
+
+        @param twtitle - a string which is the i18n key
 
         """
         if self._valid_template is None:
diff --git a/scripts/reflinks.py b/scripts/reflinks.py
index 8c1cc84..421a692 100644
--- a/scripts/reflinks.py
+++ b/scripts/reflinks.py
@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 """
+Fetch and add titles for bare links in references.
+
 This bot will search for references which are only made of a link without 
title,
 (i.e. <ref>[https://www.google.fr/]</ref> or <ref>https://www.google.fr/</ref>)
 and will fetch the html title from the link to use it as the title of the wiki
@@ -268,9 +270,10 @@
         # TODO : remove HTML when both opening and closing tags are included
 
     def avoid_uppercase(self):
-        """ If title has more than 6 characters and has 60% of uppercase
-        characters, capitalize() it
+        """
+        Convert to capitalize() if title is 60% uppercase characters.
 
+        Skip title that has less than 6 characters.
         """
         if len(self.title) <= 6:
             return
@@ -289,10 +292,12 @@
 
 class DuplicateReferences:
 
-    """ When some references are duplicated in an article,
-    name the first, and remove the content of the others
+    """Helper to de-deuplicate references in text.
 
+    When some references are duplicated in an article,
+    name the first, and remove the content of the others
     """
+
     def __init__(self):
         # Match references
         self.REFS = re.compile(
@@ -400,6 +405,8 @@
 
 class ReferencesRobot(Bot):
 
+    """References bot."""
+
     def __init__(self, generator, **kwargs):
         """- generator : Page generator."""
         self.availableOptions.update({
@@ -463,7 +470,8 @@
 
     def getPDFTitle(self, ref, f):
         """ Use pdfinfo to retrieve title from a PDF.
-        Unix-only, I'm afraid.
+
+        FIXME: Unix-only, I'm afraid.
 
         """
         pywikibot.output(u'PDF file.')
diff --git a/scripts/replace.py b/scripts/replace.py
index f0ba76e..f901f1a 100755
--- a/scripts/replace.py
+++ b/scripts/replace.py
@@ -1,9 +1,10 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-"""
-This bot will make direct text replacements. It will retrieve information on
-which pages might need changes either from an XML dump or a text file, or only
-change a single page.
+r"""
+This bot will make direct text replacements.
+
+It will retrieve information on which pages might need changes either from
+an XML dump or a text file, or only change a single page.
 
 These command line parameters can be used to specify which pages to work on:
 
@@ -83,7 +84,7 @@
 other:            First argument is the old text, second argument is the new
                   text. If the -regex argument is given, the first argument
                   will be regarded as a regular expression, and the second
-                  argument might contain expressions like \\1 or \g<name>.
+                  argument might contain expressions like \1 or \g<name>.
                   It is possible to introduce more than one pair of old text
                   and replacement.
 
@@ -93,7 +94,7 @@
 new syntax, e.g. {{Stub}}, download an XML dump file (pages-articles) from
 https://download.wikimedia.org, then use this command:
 
-    python replace.py -xml -regex "{{msg:(.*?)}}" "{{\\1}}"
+    python replace.py -xml -regex "{{msg:(.*?)}}" "{{\1}}"
 
 If you have a dump called foobar.xml and want to fix typos in articles, e.g.
 Errror -> Error, use this:
@@ -161,8 +162,8 @@
         * exceptions   - A dictionary which defines when to ignore an
                          occurence. See docu of the ReplaceRobot
                          constructor below.
-
     """
+
     def __init__(self, xmlFilename, xmlStart, replacements, exceptions):
         self.xmlFilename = xmlFilename
         self.replacements = replacements
@@ -232,6 +233,8 @@
                  acceptall=False, allowoverlap=False, recursive=False,
                  addedCat=None, sleep=None, summary='', site=None):
         """
+        Constructor.
+
         Arguments:
             * generator    - A generator that yields Page objects.
             * replacements - A list of 2-tuples of original text (as a
@@ -295,8 +298,9 @@
 
     def isTextExcepted(self, original_text):
         """
-        Iff one of the exceptions applies for the given page contents,
-        returns True.
+        Return True Iff one of the exceptions applies for the given text.
+
+        @rtype: bool
         """
         if "text-contains" in self.exceptions:
             for exc in self.exceptions['text-contains']:
@@ -306,8 +310,9 @@
 
     def doReplacements(self, original_text):
         """
-        Returns the text which is generated by applying all replacements to
-        the given text.
+        Apply all replacements to the given text.
+
+        @rtype: unicode
         """
         new_text = original_text
         exceptions = []
@@ -324,7 +329,7 @@
         return new_text
 
     def run(self):
-        """Starts the bot."""
+        """Start the bot."""
         # Run the generator which will yield Pages which might need to be
         # changed.
         for page in self.generator:
@@ -426,6 +431,7 @@
 
 
 def prepareRegexForMySQL(pattern):
+    """Convert regex to MySQL syntax."""
     pattern = pattern.replace('\s', '[:space:]')
     pattern = pattern.replace('\d', '[:digit:]')
     pattern = pattern.replace('\w', '[:alnum:]')
diff --git a/scripts/replicate_wiki.py b/scripts/replicate_wiki.py
index e7976da..f417541 100644
--- a/scripts/replicate_wiki.py
+++ b/scripts/replicate_wiki.py
@@ -1,8 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8  -*-
 """
-This bot replicates all pages (from specific namespaces) in a wiki to a second
-wiki within one family.
+This bot replicates pages in a wiki to a second wiki within one family.
 
 Example:
 python replicate_wiki.py [-r] -ns 10 -f wikipedia -o nl li fy
diff --git a/scripts/revertbot.py b/scripts/revertbot.py
index 931646b..0801e04 100644
--- a/scripts/revertbot.py
+++ b/scripts/revertbot.py
@@ -123,6 +123,8 @@
 
 class myRevertBot(BaseRevertBot):
 
+    """Example revert bot."""
+
     def callback(self, item):
         if 'top' in item:
             page = pywikibot.Page(self.site, item['title'])
diff --git a/scripts/script_wui.py b/scripts/script_wui.py
index a4e7c7d..f536bd6 100755
--- a/scripts/script_wui.py
+++ b/scripts/script_wui.py
@@ -1,8 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-Bot which runs python framework scripts as (sub-)bot and provides a
-WikiUserInterface (WUI) with Lua support for bot operators.
+Bot which runs python framework scripts as (sub-)bot.
+
+It provides a WikiUserInterface (WUI) with Lua support for bot operators.
 
 This script needs external libraries (see imports and comments there)
 in order to run properly. Most of them can be checked-out at:
@@ -116,6 +117,9 @@
 
 
 class ScriptWUIBot(pywikibot.botirc.IRCBot):
+
+    """WikiUserInterface bot."""
+
     def __init__(self, *arg):
         pywikibot.output(u'\03{lightgreen}* Initialization of bot\03{default}')
 
@@ -205,6 +209,7 @@
 
 # Define a function for the thread
 def main_script(page, rev=None, params=None):
+    """Main thread."""
     # 
http://opensourcehacker.com/2011/02/23/temporarily-capturing-python-logging-output-to-a-string-buffer/
     # https://docs.python.org/release/2.6/library/logging.html
     from io import StringIO
@@ -261,6 +266,7 @@
 
 
 def wiki_logger(buffer, page, rev=None):
+    """Log to wiki."""
     # (might be a problem here for TS and SGE, output string has another 
encoding)
     #buffer  = buffer.decode(config.console_encoding)
     buffer = re.sub("\03\{(.*?)\}(.*?)\03\{default\}", "\g<2>", buffer)
diff --git a/scripts/selflink.py b/scripts/selflink.py
index bc59411..a0cd9b5 100644
--- a/scripts/selflink.py
+++ b/scripts/selflink.py
@@ -2,8 +2,7 @@
 # -*- coding: utf-8  -*-
 
 """
-This bot goes over multiple pages of the site, searches for selflinks, and
-allows removing them.
+This bot searches for selflinks and allows removing them.
 
 These command line parameters can be used to specify which pages to work on:
 
@@ -34,6 +33,8 @@
 
 class SelflinkBot(Bot):
 
+    """Self-link removal bot."""
+
     def __init__(self, generator, **kwargs):
         super(SelflinkBot, self).__init__(**kwargs)
         self.generator = generator
diff --git a/scripts/solve_disambiguation.py b/scripts/solve_disambiguation.py
index e3070d7..879ea84 100644
--- a/scripts/solve_disambiguation.py
+++ b/scripts/solve_disambiguation.py
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-"""
+u"""
 Script to help a human solve disambiguations by presenting a set of options.
 
 Specify the disambiguation page on the command line.
@@ -353,6 +353,9 @@
 
 
 class ReferringPageGeneratorWithIgnore:
+
+    """Referring Page generator, with an ignore manager."""
+
     def __init__(self, disambPage, primary=False, minimum=0):
         self.disambPage = disambPage
         # if run with the -primary argument, enable the ignore manager
@@ -390,11 +393,14 @@
 class PrimaryIgnoreManager(object):
 
     """
+    Primary ignore manager.
+
     If run with the -primary argument, reads from a file which pages should
     not be worked on; these are the ones where the user pressed n last time.
     If run without the -primary argument, doesn't ignore any pages.
 
     """
+
     def __init__(self, disambPage, enabled=False):
         self.disambPage = disambPage
         self.enabled = enabled
@@ -438,6 +444,8 @@
 
 class DisambiguationRobot(Bot):
 
+    """Disambiguation bot."""
+
     ignore_contents = {
         'de': (u'{{[Ii]nuse}}',
                u'{{[Ll]öschen}}',
@@ -478,13 +486,15 @@
         self.setupRegexes()
 
     def checkContents(self, text):
-        '''
+        """
+        Check if the text matches any of the ignore regexes.
+
         For a given text, returns False if none of the regular
         expressions given in the dictionary at the top of this class
         matches a substring of the text.
         Otherwise returns the substring which is matched by one of
         the regular expressions.
-        '''
+        """
         for ig in self.ignore_contents_regexes:
             match = ig.search(text)
             if match:
@@ -531,6 +541,8 @@
 
     def treat(self, refPage, disambPage):
         """
+        Treat a page.
+
         Parameters:
             disambPage - The disambiguation page or redirect we don't want
                 anything to link to
diff --git a/scripts/spamremove.py b/scripts/spamremove.py
index 780f923..06fe278 100755
--- a/scripts/spamremove.py
+++ b/scripts/spamremove.py
@@ -3,6 +3,7 @@
 
 """
 Script to remove links that are being or have been spammed.
+
 Usage:
 
 spamremove.py www.spammedsite.com
diff --git a/scripts/template.py b/scripts/template.py
index 33728d3..5926aa0 100755
--- a/scripts/template.py
+++ b/scripts/template.py
@@ -1,8 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 """
-Very simple script to replace a template with another one,
-and to convert the old MediaWiki boilerplate format to the new template format.
+Very simple script to replace a template with another one.
+
+It also converts the old MediaWiki boilerplate format to the new template 
format.
 
 Syntax: python template.py [-remove] [xml[:filename]] oldTemplate [newTemplate]
 
@@ -24,7 +25,7 @@
              the same effect.
 
 -xml         retrieve information from a local dump
-             (https://download.wikimedia.org). If this argument isn\'t given,
+             (https://download.wikimedia.org). If this argument isn't given,
              info will be loaded from the maintenance page of the live wiki.
              argument can also be given as "-xml:filename.xml".
 
@@ -115,8 +116,9 @@
 
 def UserEditFilterGenerator(generator, username, timestamp=None, skip=False):
     """
-    Generator which will yield Pages depending of user:username is an Author of
-    that page (only looks at the last 100 editors).
+    Generator which will yield Pages modified by username.
+
+    It Only looks at the last 100 editors.
     If timestamp is set in MediaWiki format JJJJMMDDhhmmss, older edits are
     ignored
     If skip is set, pages edited by the given user are ignored otherwise only
@@ -145,13 +147,16 @@
 class XmlDumpTemplatePageGenerator:
 
     """
-    Generator which will yield Pages to pages that might contain the chosen
-    template. These pages will be retrieved from a local XML dump file
-    (cur table).
+    Generator which yield Pages that transclude a template.
+
+    These pages will be retrieved from a local XML dump file
+    (cur table), and may not still transclude the template.
     """
 
     def __init__(self, templates, xmlfilename):
         """
+        Constructor.
+
         Arguments:
             * templateNames - A list of Page object representing the searched
                               templates
@@ -189,12 +194,8 @@
 
 class TemplateRobot(Bot):
 
-    """
-    This bot will load all pages yielded by a page generator and replace or
-    remove all occurences of the old template, or substitute them with the
-    template's text.
+    """This bot will replace, remove or subst all occurences of a template."""
 
-    """
     def __init__(self, generator, templates, **kwargs):
         """
         Constructor.
diff --git a/scripts/templatecount.py b/scripts/templatecount.py
index 3664fcc..ced90d7 100644
--- a/scripts/templatecount.py
+++ b/scripts/templatecount.py
@@ -1,8 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-This script will display the list of pages transcluding a given list of
-templates. It can also be used to simply count the number of pages (rather than
+This script will display the list of pages transcluding a given list of 
templates.
+
+It can also be used to simply count the number of pages (rather than
 listing each individually).
 
 Syntax: python templatecount.py command [arguments]
@@ -46,6 +47,8 @@
 
 class TemplateCountRobot:
 
+    """Template count bot."""
+
     @staticmethod
     def countTemplates(templates, namespaces):
         templateDict = TemplateCountRobot.template_dict(templates, namespaces)
diff --git a/scripts/touch.py b/scripts/touch.py
index e309565..ab783d9 100755
--- a/scripts/touch.py
+++ b/scripts/touch.py
@@ -2,8 +2,9 @@
 # -*- coding: utf-8  -*-
 
 """
-This bot goes over multiple pages of a wiki, and edits them without
-changing. This is for example used to get category links in templates
+This bot goes over multiple pages of a wiki, and edits them without changes.
+
+This is for example used to get category links in templates
 working.
 
 This script understands various command-line arguments:
@@ -31,6 +32,8 @@
 
 class TouchBot(pywikibot.Bot):
 
+    """Page touch bot."""
+
     def __init__(self, generator, **kwargs):
         self.availableOptions.update({
             'redir': False,  # include redirect pages
diff --git a/scripts/transferbot.py b/scripts/transferbot.py
index ab735c5..fb7bfa0 100644
--- a/scripts/transferbot.py
+++ b/scripts/transferbot.py
@@ -2,8 +2,9 @@
 # -*- coding: utf-8  -*-
 
 """
-This script transfers pages from a source wiki to a target wiki. It also
-copies edit history to a subpage.
+This script transfers pages from a source wiki to a target wiki.
+
+It also copies edit history to a subpage.
 
 -tolang:          The target site code.
 
diff --git a/scripts/unlink.py b/scripts/unlink.py
index 1a8161b..0905279 100755
--- a/scripts/unlink.py
+++ b/scripts/unlink.py
@@ -36,6 +36,8 @@
 
 class UnlinkBot(Bot):
 
+    """Page unlinking bot."""
+
     def __init__(self, pageToUnlink, **kwargs):
         self.availableOptions.update({
             'namespaces': [],
@@ -73,7 +75,7 @@
 
     def handleNextLink(self, text, match, context=100):
         """
-        Returns a tuple (text, jumpToBeginning).
+        Return a tuple (text, jumpToBeginning).
 
         text is the unicode string after the current link has been processed.
         jumpToBeginning is a boolean which specifies if the cursor position
diff --git a/scripts/unusedfiles.py b/scripts/unusedfiles.py
index 7dcb5a2..d442268 100644
--- a/scripts/unusedfiles.py
+++ b/scripts/unusedfiles.py
@@ -1,8 +1,7 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
 """
-This bot appends some text to all unused images and other text to the
-respective uploaders.
+This bot appends some text to all unused images and notifies uploaders.
 
 Parameters:
 
@@ -51,6 +50,8 @@
 
 class UnusedFilesBot(Bot):
 
+    """Unused files bot."""
+
     def __init__(self, site, **kwargs):
         super(UnusedFilesBot, self).__init__(**kwargs)
         self.site = site
diff --git a/scripts/upload.py b/scripts/upload.py
index 4c45a80..893cfcb 100755
--- a/scripts/upload.py
+++ b/scripts/upload.py
@@ -59,11 +59,16 @@
 
 
 class UploadRobot:
+
+    """Upload bot."""
+
     def __init__(self, url, urlEncoding=None, description=u'',
                  useFilename=None, keepFilename=False,
                  verifyDescription=True, ignoreWarning=False,
                  targetSite=None, uploadByUrl=False, aborts=[], chunk_size=0):
         """
+        Constructor.
+
         @param ignoreWarning: Set this to True if you want to upload even if
             another file would be overwritten or another mistake would be
             risked.
@@ -294,7 +299,7 @@
             return filename  # data['filename']
 
     def run(self):
-
+        """Run bot."""
         # early check that upload is enabled
         if self.targetSite.is_uploaddisabled():
             pywikibot.error(
diff --git a/scripts/version.py b/scripts/version.py
index 3dc2b2d..d211884 100755
--- a/scripts/version.py
+++ b/scripts/version.py
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8  -*-
-""" Script to determine the Pywikibot version (tag, revision and date) """
+""" Script to determine the Pywikibot version (tag, revision and date). """
 #
 # (C) Merlijn 'valhallasw' van Deen, 2007-2008
 # (C) xqt, 2010-2014
diff --git a/scripts/weblinkchecker.py b/scripts/weblinkchecker.py
index 0bc4fde..3efe142 100644
--- a/scripts/weblinkchecker.py
+++ b/scripts/weblinkchecker.py
@@ -1,7 +1,8 @@
 # -*- coding: utf-8  -*-
 """
-This bot is used for checking external links found at the wiki. It checks
-several pages at once, with a limit set by the config variable
+This bot is used for checking external links found at the wiki.
+
+It checks several pages at once, with a limit set by the config variable
 max_external_links, which defaults to 50.
 
 The bot won't change any wiki pages, it will only report dead links such that
@@ -149,6 +150,11 @@
 
 
 def weblinksIn(text, withoutBracketed=False, onlyBracketed=False):
+    """
+    Yield web links from text.
+
+    TODO: move to textlib
+    """
     text = textlib.removeDisabledParts(text)
 
     # MediaWiki parses templates before parsing external links. Thus, there
@@ -229,6 +235,8 @@
 class LinkChecker(object):
 
     """
+    Check links.
+
     Given a HTTP URL, tries to load the page from the Internet and checks if it
     is still online.
 
@@ -239,9 +247,12 @@
     correctly! (This will give a Socket Error)
 
     """
+
     def __init__(self, url, redirectChain=[], serverEncoding=None,
                  HTTPignore=[]):
         """
+        Constructor.
+
         redirectChain is a list of redirects which were resolved by
         resolveRedirect(). This is needed to detect redirect loops.
         """
@@ -322,12 +333,12 @@
 
     def resolveRedirect(self, useHEAD=False):
         """
-        Requests the header from the server. If the page is an HTTP redirect,
-        returns the redirect target URL as a string. Otherwise returns None.
+        Return the redirect target URL as a string, if it is a HTTP redirect.
 
         If useHEAD is true, uses the HTTP HEAD method, which saves bandwidth
         by not downloading the body. Otherwise, the HTTP GET method is used.
 
+        @rtype: unicode or None
         """
         conn = self.getConnection()
         try:
@@ -387,8 +398,9 @@
 
     def check(self, useHEAD=False):
         """
-        Returns True and the server status message if the page is alive.
-        Otherwise returns false
+        Return True and the server status message if the page is alive.
+
+        @rtype: tuple of (bool, unicode)
         """
         try:
             wasRedirected = self.resolveRedirect(useHEAD=useHEAD)
@@ -480,10 +492,11 @@
 
 class LinkCheckThread(threading.Thread):
 
-    """ A thread responsible for checking one URL. After checking the page, it
-    will die.
+    """ A thread responsible for checking one URL.
 
+    After checking the page, it will die.
     """
+
     def __init__(self, page, url, history, HTTPignore, day):
         threading.Thread.__init__(self)
         self.page = page
@@ -515,7 +528,10 @@
 
 class History:
 
-    """ Store previously found dead links. The URLs are dictionary keys, and
+    """
+    Store previously found dead links.
+
+    The URLs are dictionary keys, and
     values are lists of tuples where each tuple represents one time the URL was
     found dead. Tuples have the form (title, date, error) where title is the
     wiki page where the URL was found, date is an instance of time, and error 
is
@@ -551,7 +567,7 @@
             self.historyDict = {}
 
     def log(self, url, error, containingPage, archiveURL):
-        """Logs an error report to a text file in the deadlinks 
subdirectory."""
+        """Log an error report to a text file in the deadlinks subdirectory."""
         if archiveURL:
             errorReport = u'* %s ([%s archive])\n' % (url, archiveURL)
         else:
@@ -579,7 +595,7 @@
                                      archiveURL)
 
     def setLinkDead(self, url, error, page, day):
-        """Adds the fact that the link was found dead to the .dat file."""
+        """Add the fact that the link was found dead to the .dat file."""
         self.semaphore.acquire()
         now = time.time()
         if url in self.historyDict:
@@ -604,8 +620,11 @@
 
     def setLinkAlive(self, url):
         """
-        If the link was previously found dead, removes it from the .dat file
-        and returns True, else returns False.
+        Record that the link is now alive.
+
+        If link was previously found dead, remove it from the .dat file.
+
+        @return: True if previously found dead, else returns False.
         """
         if url in self.historyDict:
             self.semaphore.acquire()
@@ -628,8 +647,9 @@
 class DeadLinkReportThread(threading.Thread):
 
     """
-    A Thread that is responsible for posting error reports on talk pages. There
-    will only be one DeadLinkReportThread, and it is using a semaphore to make
+    A Thread that is responsible for posting error reports on talk pages.
+
+    There is only one DeadLinkReportThread, and it is using a semaphore to make
     sure that two LinkCheckerThreads can not access the queue at the same time.
     """
 
@@ -641,10 +661,7 @@
         self.killed = False
 
     def report(self, url, errorReport, containingPage, archiveURL):
-        """ Tries to add an error report to the talk page belonging to the page
-        containing the dead link.
-
-        """
+        """Report error on talk page of the page containing the dead link."""
         self.semaphore.acquire()
         self.queue.append((url, errorReport, containingPage, archiveURL))
         self.semaphore.release()
@@ -727,10 +744,11 @@
 class WeblinkCheckerRobot:
 
     """
-    Bot which will use several LinkCheckThreads at once to search for dead
-    weblinks on pages provided by the given generator.
+    Bot which will search for dead weblinks.
 
+    It uses several LinkCheckThreads at once to process pages from generator.
     """
+
     def __init__(self, generator, HTTPignore=None, day=7):
         self.generator = generator
         if config.report_dead_links_on_talk:
@@ -777,6 +795,7 @@
 
 
 def RepeatPageGenerator():
+    """Generator for pages in History."""
     history = History(None)
     pageTitles = set()
     for value in history.historyDict.values():
@@ -788,6 +807,12 @@
 
 
 def countLinkCheckThreads():
+    """
+    Count LinkCheckThread threads.
+
+    @return: number of LinkCheckThread threads
+    @rtype: int
+    """
     i = 0
     for thread in threading.enumerate():
         if isinstance(thread, LinkCheckThread):
diff --git a/scripts/welcome.py b/scripts/welcome.py
index e8d2c2a..f10660d 100644
--- a/scripts/welcome.py
+++ b/scripts/welcome.py
@@ -1,11 +1,10 @@
 # -*- coding: utf-8  -*-
-"""
-Script to welcome new users. This script works out of the box for Wikis that
+u"""
+Script to welcome new users.
+
+This script works out of the box for Wikis that
 have been defined in the script. It is currently used on the Dutch, Norwegian,
 Albanian, Italian Wikipedia, Wikimedia Commons and English Wikiquote.
-
-Note: You can download the latest version available
-from here: https://www.mediawiki.org/wiki/Manual:Pywikibot/welcome.py
 
 Ensure you have community support before running this bot!
 
@@ -402,14 +401,13 @@
 
 
 class FilenameNotSet(pywikibot.Error):
+
     """An exception indicating that a signature filename was not specifed."""
 
 
 class Global(object):
 
-    """Container class for global settings.
-       Use of globals outside of this is to be avoided.
-       """
+    """Container class for global settings."""
 
     attachEditCount = 1     # number of edits that an user required to be 
welcomed
     dumpToLog = 15          # number of users that are required to add the log 
:)
@@ -437,7 +435,6 @@
 
     def __init__(self):
         """Constructor."""
-
         self.site = pywikibot.Site()
         self.check_managed_sites()
         self.bname = dict()
@@ -856,6 +853,7 @@
 
 
 def showStatus(n=0):
+    """Output colorized status."""
     staColor = {
         0: 'lightpurple',
         1: 'lightaqua',
@@ -877,7 +875,7 @@
 
 
 def load_word_function(raw):
-    """ This is a function used to load the badword and the whitelist."""
+    """Load the badword list and the whitelist."""
     page = re.compile(r"(?:\"|\')(.*?)(?:\"|\')(?:, |\))", re.UNICODE)
     list_loaded = page.findall(raw)
     if len(list_loaded) == 0:
diff --git a/tox.ini b/tox.ini
index 804c874..8f5ba59 100644
--- a/tox.ini
+++ b/tox.ini
@@ -48,12 +48,58 @@
     ./pywikibot/data/__init__.py \
     ./pywikibot/userinterfaces/transliteration.py \
     ./pywikibot/userinterfaces/terminal_interface.py \
+    ./scripts/__init__.py \
+    ./scripts/basic.py \
     ./scripts/category.py \
+    ./scripts/category_redirect.py \
     ./scripts/claimit.py \
+    ./scripts/clean_sandbox.py \
+    ./scripts/commons_link.py \
+    ./scripts/commonscat.py \
     ./scripts/coordinate_import.py \
+    ./scripts/cosmetic_changes.py \
+    ./scripts/create_categories.py \
+    ./scripts/data_ingestion.py \
+    ./scripts/delete.py \
+    ./scripts/editarticle.py \
+    ./scripts/flickrripper.py \
+    ./scripts/freebasemappingupload.py \
     ./scripts/harvest_template.py \
     ./scripts/illustrate_wikidata.py \
+    ./scripts/image.py \
+    ./scripts/imagerecat.py \
+    ./scripts/imagetransfer.py \
+    ./scripts/imageuncat.py \
+    ./scripts/isbn.py \
+    ./scripts/listpages.py \
+    ./scripts/login.py \
+    ./scripts/lonelypages.py \
     ./scripts/newitem.py \
+    ./scripts/misspelling.py \
+    ./scripts/movepages.py \
+    ./scripts/noreferences.py \
+    ./scripts/nowcommons.py \
+    ./scripts/pagefromfile.py \
+    ./scripts/protect.py \
+    ./scripts/redirect.py \
+    ./scripts/reflinks.py \
+    ./scripts/replace.py \
+    ./scripts/replicate_wiki.py \
+    ./scripts/revertbot.py \
+    ./scripts/script_wui.py \
+    ./scripts/selflink.py \
+    ./scripts/shell.py \
+    ./scripts/spamremove.py \
+    ./scripts/template.py \
+    ./scripts/templatecount.py \
+    ./scripts/touch.py \
+    ./scripts/transferbot.py \
+    ./scripts/unlink.py \
+    ./scripts/unusedfiles.py \
+    ./scripts/version.py \
+    ./scripts/watchlist.py \
+    ./scripts/weblinkchecker.py \
+    ./scripts/welcome.py \
     ./tests/aspects.py \
     ./tests/api_tests.py \
     ./tests/dry_api_tests.py \

-- 
To view, visit https://gerrit.wikimedia.org/r/165453
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ic5a25fac9592fead9a6d8b0748bf13947ef7f2c7
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/core
Gerrit-Branch: master
Gerrit-Owner: John Vandenberg <jay...@gmail.com>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to