Revision: 7650
Author:   alexsh
Date:     2009-11-15 13:17:01 +0000 (Sun, 15 Nov 2009)

Log Message:
-----------
fix all MyURLopener AttributeError

Modified Paths:
--------------
    trunk/pywikipedia/imageharvest.py
    trunk/pywikipedia/maintenance/wikimedia_sites.py
    trunk/pywikipedia/match_images.py
    trunk/pywikipedia/saveHTML.py

Modified: trunk/pywikipedia/imageharvest.py
===================================================================
--- trunk/pywikipedia/imageharvest.py   2009-11-15 13:13:48 UTC (rev 7649)
+++ trunk/pywikipedia/imageharvest.py   2009-11-15 13:17:01 UTC (rev 7650)
@@ -31,7 +31,7 @@
         relativepath=relativepath[:len(relativepath)-1]
         relativepath="/".join(relativepath)
     links = []
-    uo = wikipedia.MyURLopener()
+    uo = wikipedia.MyURLopener
     file = uo.open(url)
     text = file.read()
     file.close()

Modified: trunk/pywikipedia/maintenance/wikimedia_sites.py
===================================================================
--- trunk/pywikipedia/maintenance/wikimedia_sites.py    2009-11-15 13:13:48 UTC 
(rev 7649)
+++ trunk/pywikipedia/maintenance/wikimedia_sites.py    2009-11-15 13:17:01 UTC 
(rev 7650)
@@ -25,7 +25,7 @@
     obsolete = wikipedia.Family(family).obsolete
 
     url = 'http://s23.org/wikistats/%s' % familiesDict[family]
-    uo = wikipedia.MyURLopener()
+    uo = wikipedia.MyURLopener
     f = uo.open(url)
     text = f.read()
 

Modified: trunk/pywikipedia/match_images.py
===================================================================
--- trunk/pywikipedia/match_images.py   2009-11-15 13:13:48 UTC (rev 7649)
+++ trunk/pywikipedia/match_images.py   2009-11-15 13:17:01 UTC (rev 7650)
@@ -80,7 +80,7 @@
     Get the image object to work based on an imagePage object
     '''
     imageURL=imagePage.fileUrl()
-    imageURLopener= wikipedia.MyURLopener()
+    imageURLopener= wikipedia.MyURLopener
     imageWebFile = imageURLopener.open(imageURL)
     imageBuffer = StringIO.StringIO(imageWebFile.read())
     image = Image.open(imageBuffer)

Modified: trunk/pywikipedia/saveHTML.py
===================================================================
--- trunk/pywikipedia/saveHTML.py       2009-11-15 13:13:48 UTC (rev 7649)
+++ trunk/pywikipedia/saveHTML.py       2009-11-15 13:17:01 UTC (rev 7650)
@@ -200,14 +200,14 @@
                     print "skipping existing " + i['image']
                     continue
                 print 'downloading ' + i['image'],
-                uo = wikipedia.MyURLopener()
+                uo = wikipedia.MyURLopener
                 file = uo.open( "http://upload.wikimedia.org/wikipedia/";
                                 +mysite.lang + '/' + i['path'] + i['image'])
                 content = file.read()
                 if (len(content) < 500):
                     uo.close()
                     print "downloading from commons",
-                    uo = wikipedia.MyURLopener()
+                    uo = wikipedia.MyURLopener
                     file = uo.open( "http://commons.wikimedia.org/upload/";
                                     + i['path'] + i['image'])
                     #print "http://commons.wikimedia.org/upload/";, i['path'] , 
i['image'], file



_______________________________________________
Pywikipedia-svn mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/pywikipedia-svn

Reply via email to