Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2019-05-03 22:47:26
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.5148 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Fri May  3 22:47:26 2019 rev:8 rq:700318 version:3.7.6

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2019-04-28 20:15:41.750334864 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.5148/python-fanficfare.changes
    2019-05-03 22:47:26.915909991 +0200
@@ -1,0 +2,15 @@
+Fri May  3 08:45:55 CEST 2019 - Matej Cepl <[email protected]>
+
+- Update to 3.7.6:
+  - Fix CLI Upload to have correct version.
+  - Fix for BS halping with string conversions on PI update from
+    Saved Meta Column.
+  - More improvements for adapter_asianfanficscom, including
+    auto_sub feature, thanks oh45454545
+  - Improvements for adapter_asianfanficscom, thanks oh45454545
+  - Update adapter_asianfanficscom to fetch chapter texts from
+    JSON url.
+  - Ad wall indicator has changed for adapter_webnovelcom.
+  - Fix use_archived_author in AO3.
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-3.7.0.tar.gz

New:
----
  FanFicFare-3.7.6.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.slTqOD/_old  2019-05-03 22:47:27.411911130 +0200
+++ /var/tmp/diff_new_pack.slTqOD/_new  2019-05-03 22:47:27.415911139 +0200
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-fanficfare
-Version:        3.7.0
+Version:        3.7.6
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-3.7.0.tar.gz -> FanFicFare-3.7.6.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/calibre-plugin/__init__.py 
new/FanFicFare-3.7.6/calibre-plugin/__init__.py
--- old/FanFicFare-3.7.0/calibre-plugin/__init__.py     2019-04-19 
21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/calibre-plugin/__init__.py     2019-04-24 
05:29:17.000000000 +0200
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (3, 7, 0)
+__version__ = (3, 7, 6)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-3.7.6/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-3.7.0/calibre-plugin/plugin-defaults.ini     2019-04-19 
21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/calibre-plugin/plugin-defaults.ini     2019-04-24 
05:29:17.000000000 +0200
@@ -2133,10 +2133,11 @@
 
 [www.asianfanfics.com]
 ## Unlike most sites, asianfanfics.com, instead of denying access to
-## 'adult' content, will censor the text of stories to remove the
-## 'adult' words.  FanFicFare cannot detect when this happens, but if
-## you set your username and password, FFF will log you in to prevent
-## that.
+## 'adult' or subscriber-only content, will censor the text of stories
+## to remove 'adult' words or entire portions of the text. This is why
+## an account is required to download stories from this site. It is
+## also strongly recommended to consider enabling auto_sub in order to
+## further avoid this from happening.
 #username:YourName
 #password:yourpassword
 
@@ -2163,6 +2164,14 @@
 ## chapter_start to remove the standard chapter title, as shown below.
 inject_chapter_title:false
 
+## This website removes certain HTML tags and portions of the story
+## from subscriber-only stories. It is strongly recommended to turn
+## this option on. This will automatically subscribe you to such
+## stories in order to acquire the unaltered text. You can
+## unsubscribe manually on the website after the story has been
+## downloaded.
+auto_sub:false
+
 [www.bdsmlibrary.com]
 ## Some sites also require the user to confirm they are adult for
 ## adult content.  Uncomment by removing '#' in front of is_adult.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-3.7.0/fanficfare/adapters/adapter_archiveofourownorg.py 
new/FanFicFare-3.7.6/fanficfare/adapters/adapter_archiveofourownorg.py
--- old/FanFicFare-3.7.0/fanficfare/adapters/adapter_archiveofourownorg.py      
2019-04-19 21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/fanficfare/adapters/adapter_archiveofourownorg.py      
2019-04-24 05:29:17.000000000 +0200
@@ -225,8 +225,7 @@
         # Hope Roy [archived by <a 
href="/users/ssa_archivist/pseuds/ssa_archivist" rel="author">ssa_archivist</a>]
         # </h3>
         # stripped:"Hope Roy [archived by ssa_archivist]"
-
-        m = re.match(r'(?P<author>.*) \[archived by 
(?P<archivist>.*)\]',stripHTML(byline))
+        m = re.match(r'(?P<author>.*) \[archived by 
?(?P<archivist>.*)\]',stripHTML(byline))
         if( m and
             len(alist) == 1 and
             self.getConfig('use_archived_author') ):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-3.7.0/fanficfare/adapters/adapter_asianfanficscom.py 
new/FanFicFare-3.7.6/fanficfare/adapters/adapter_asianfanficscom.py
--- old/FanFicFare-3.7.0/fanficfare/adapters/adapter_asianfanficscom.py 
2019-04-19 21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/fanficfare/adapters/adapter_asianfanficscom.py 
2019-04-24 05:29:17.000000000 +0200
@@ -4,6 +4,7 @@
 import logging
 logger = logging.getLogger(__name__)
 import re
+import json
 from ..htmlcleanup import stripHTML
 from .. import exceptions as exceptions
 
@@ -70,45 +71,40 @@
         else:
             params['username'] = self.getConfig("username")
             params['password'] = self.getConfig("password")
+
+        if not params['username']:
+            raise exceptions.FailedToLogin(url,params['username'])
+
         params['from_url'] = url
         params['csrf_aff_token'] = 
soup.find('input',{'name':'csrf_aff_token'})['value']
+        if not params['csrf_aff_token']:
+            raise exceptions.FailedToDownload('Error when logging in. This 
usually means a change in the website code.')
+
         loginUrl = 'https://' + self.getSiteDomain() + '/login/index'
         logger.info("Will now login to URL (%s) as (%s)" % (loginUrl, 
params['username']))
 
-        d = self._postUrl(loginUrl, params, usecache=False)
-
-        if params['username'] not in d: # check if username is mentioned in 
output (logged in as, var visitorName, etc.)
-            logger.info("Failed to login to URL %s as %s" % (loginUrl, 
params['username']))
+        data = self._postUrl(loginUrl, params)
+        soup = self.make_soup(data)
+        if self.loginNeededCheck(soup):
+            logger.info('Failed to login to URL %s as %s' % (loginUrl, 
params['username']))
             raise exceptions.FailedToLogin(url,params['username'])
-            return False
-        else:
-            return True
 
-    def doAdultCheck(self, url, soup):
-        check = soup.find('form',{'action':'/account/toggle_age'})
-        if check:
-            logger.debug("Found adult check")
-            if self.is_adult or self.getConfig("is_adult"):
-                contentFilter = 
check.find('a',{'href':'/account/mark_over_18'}) #two different types of adult 
checks
-                if contentFilter:
-                    loginUrl = 'https://' + self.getSiteDomain() + 
'/account/mark_over_18'
-                    self._fetchUrl(loginUrl)
-                else:
-                    params = {}
-                    params['csrf_aff_token'] = 
check.find('input',{'name':'csrf_aff_token'})['value']
-                    params['is_of_age'] = '1'
-                    params['current_url'] = '/story/view/' + 
self.story.getMetadata('storyId')
-                    loginUrl = 'https://' + self.getSiteDomain() + 
'/account/toggle_age'
-                    self._postUrl(loginUrl,params)
-
-                data = self._fetchUrl(url,usecache=False)
-                soup = self.make_soup(data)
-                if "Are you over 18 years old" in data:
-                    raise exceptions.FailedToDownload("Error downloading 
Chapter: %s!  Missing required element!" % url)
-                else:
-                    return soup
+    def loginNeededCheck(self,soup):
+        return soup.find('div',{'id':'login'}) != None
+
+    def doStorySubscribe(self, url, soup):
+        subHref = soup.find('a',{'id':'subscribe'})
+        if subHref:
+            #does not work when using https - 403
+            subUrl = 'http://' + self.getSiteDomain() + subHref['href']
+            self._fetchUrl(subUrl)
+            data = self._fetchUrl(url,usecache=False)
+            soup = self.make_soup(data)
+            check = soup.find('div',{'class':'click-to-read-full'})
+            if check:
+                return False
             else:
-                raise exceptions.AdultCheckRequired(self.url)
+                return soup
         else:
             return False
 
@@ -123,7 +119,6 @@
     def doExtractChapterUrlsAndMetadata(self,get_cover=True):
         url = self.url
         logger.info("url: "+url)
-
         try:
             data = self._fetchUrl(url)
 
@@ -136,18 +131,23 @@
         # use BeautifulSoup HTML parser to make everything easier to find.
         soup = self.make_soup(data)
 
-        # it is best to log in whenever possible, unless already logged in 
from cache..
-        if self.password or self.getConfig("password") and "Logout" not in 
data:
+        if self.loginNeededCheck(soup):
+            # always login if not already to avoid lots of headaches
             self.performLogin(url,soup)
+            # refresh website after logging in
             data = self._fetchUrl(url,usecache=False)
             soup = self.make_soup(data)
-        elif "Logout" not in data:
-            logger.info('Note: Logging in is highly recommended, as this 
website censors text if not logged in.')
 
-        # adult check
-        self.checkSoup = self.doAdultCheck(url,soup)
-        if self.checkSoup:
-            soup = self.checkSoup
+        # subscription check
+        subCheck = soup.find('div',{'class':'click-to-read-full'})
+        if subCheck and self.getConfig("auto_sub"):
+            subSoup = self.doStorySubscribe(url,soup)
+            if subSoup:
+                soup = subSoup
+            else:
+                raise exceptions.FailedToDownload("Error when subscribing to 
story. This usually means a change in the website code.")
+        elif subCheck and not self.getConfig("auto_sub"):
+            raise exceptions.FailedToDownload("This story is only available to 
subscribers. You can subscribe manually on the web site, or set auto_sub:true 
in personal.ini.")
 
         ## Title
         a = soup.find('h1', {'id': 'story-title'})
@@ -171,6 +171,7 @@
         for index, chapter in enumerate(chapters):
             if chapter.text != 'Foreword': # skip the foreword
                 self.add_chapter(chapter.text,'https://' + 
self.getSiteDomain() + chapter['value']) # note: AFF cuts off chapter names in 
list. this gets kind of fixed later on
+
         # find timestamp
         a = soup.find('span', text='Updated')
         if a == None:
@@ -189,7 +190,10 @@
             self.story.setMetadata('status', 'In-Progress')
 
         # story description
-        a = soup.find('div', {'id':'story-description'})
+        jsonlink = 
soup.find('link',href=re.compile(r'/api/forewords/[0-9]+/foreword_[0-9a-z]+.json'))
+        fore_json = json.loads(self._fetchUrl(jsonlink['href']))
+        content = self.make_soup(fore_json['post']).find('body') # BS4 adds 
<html><body> if not present.
+        a = content.find('div', {'id':'story-description'})
         if a:
             self.setDescription(url,a)
 
@@ -216,6 +220,12 @@
             a = a.parent.find('time')
             self.story.setMetadata('dateUpdated', makeDate(a['datetime'], 
self.dateformat))
 
+        # word count
+        a = soup.find('span', text='Total Word Count')
+        if a:
+            a = a.find_next('span')
+            self.story.setMetadata('numWords', int(a.text.split()[0]))
+
         # upvote, subs, and views
         a = soup.find('div',{'class':'title-meta'})
         spans = a.findAll('span', recursive=False)
@@ -241,22 +251,21 @@
         data = self._fetchUrl(url)
         soup = self.make_soup(data)
 
-        # have to do adult check here as well because individual chapters can 
be marked as mature
-        if not self.checkSoup:
-            self.checkSoup = self.doAdultCheck(url,soup)
-            if self.checkSoup:
-                soup = self.checkSoup
-
-        # grab contents
-        content = soup.find('div', {'id': 'user-submitted-body'})
-        if content:
+        try:
+            # 
https://www.asianfanfics.com/api/chapters/4791923/chapter_46d32e413d1a702a26f7637eabbfb6f3.json
+            jsonlink = 
soup.find('link',href=re.compile(r'/api/chapters/[0-9]+/chapter_[0-9a-z]+.json'))
+            chap_json = json.loads(self._fetchUrl(jsonlink['href']))
+            content = self.make_soup(chap_json['post']).find('body') # BS4 
adds <html><body> if not present.
+            content.name='div' # change body to a div.
             if self.getConfig('inject_chapter_title'):
+                # the dumbest workaround ever for the abbreviated chapter 
titles from before
                 logger.debug("Injecting full-length chapter title")
                 newTitle = soup.find('h1', {'id' : 'chapter-title'}).text
-                newTitle = self.make_soup('<h3>%s</h3>' % (newTitle)) # the 
dumbest workaround ever for the abbreviated chapter titles from before
+                newTitle = self.make_soup('<h3>%s</h3>' % 
(newTitle)).find('body') # BS4 adds <html><body> if not present.
+                newTitle.name='div' # change body to a div.
                 newTitle.append(content)
                 return self.utf8FromSoup(url,newTitle)
             else:
                 return self.utf8FromSoup(url,content)
-        else:
-            raise exceptions.FailedToDownload("Error downloading Chapter: %s!  
Missing required element!" % url)
+        except Exception as e:
+            raise exceptions.FailedToDownload("Error downloading Chapter: %s 
%s!" % (url,e))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-3.7.0/fanficfare/adapters/adapter_webnovelcom.py 
new/FanFicFare-3.7.6/fanficfare/adapters/adapter_webnovelcom.py
--- old/FanFicFare-3.7.0/fanficfare/adapters/adapter_webnovelcom.py     
2019-04-19 21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/fanficfare/adapters/adapter_webnovelcom.py     
2019-04-24 05:29:17.000000000 +0200
@@ -179,11 +179,9 @@
         for volume in jsondata["data"]["volumeItems"]:
             for chap in volume["chapterItems"]:
                 # Only allow free and VIP type 1 chapters
-                if chap['isVip'] not in [0]: # removed VIP type 1
-                                             # chapter (ad-wall'ed)
-                                             # because the ad-wall
-                                             # bypass code stopped
-                                             # working. --JM
+                if chap['isAuth'] not in [1]: # Ad wall indicator
+                                              # seems to have changed
+                                              # --JM
                     continue
 
                 chap_title = 'Chapter ' + unicode(chap['index']) + ' - ' + 
chap['name']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/fanficfare/cli.py 
new/FanFicFare-3.7.6/fanficfare/cli.py
--- old/FanFicFare-3.7.0/fanficfare/cli.py      2019-04-19 21:08:56.000000000 
+0200
+++ new/FanFicFare-3.7.6/fanficfare/cli.py      2019-04-24 05:29:17.000000000 
+0200
@@ -39,7 +39,7 @@
     def pickle_load(f):
         return pickle.load(f,encoding="bytes")
 
-version="3.7.0"
+version="3.7.6"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/fanficfare/configurable.py 
new/FanFicFare-3.7.6/fanficfare/configurable.py
--- old/FanFicFare-3.7.0/fanficfare/configurable.py     2019-04-19 
21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/fanficfare/configurable.py     2019-04-24 
05:29:17.000000000 +0200
@@ -244,6 +244,8 @@
 
                'inject_chapter_title':(['asianfanfics.com'],None,boollist),
 
+               'auto_sub':(['asianfanfics.com'],None,boollist),
+
                # eFiction Base adapters allow bulk_load
                # kept forgetting to add them, so now it's automatic.
                'bulk_load':(adapters.get_bulk_load_sites(),
@@ -441,6 +443,7 @@
                  'conditionals_use_lists',
                  'description_in_chapter',
                  'inject_chapter_title',
+                 'auto_sub',
                  'titlepage_end',
                  'titlepage_entries',
                  'titlepage_entry',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/fanficfare/defaults.ini 
new/FanFicFare-3.7.6/fanficfare/defaults.ini
--- old/FanFicFare-3.7.0/fanficfare/defaults.ini        2019-04-19 
21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/fanficfare/defaults.ini        2019-04-24 
05:29:17.000000000 +0200
@@ -2167,10 +2167,11 @@
 
 [www.asianfanfics.com]
 ## Unlike most sites, asianfanfics.com, instead of denying access to
-## 'adult' content, will censor the text of stories to remove the
-## 'adult' words.  FanFicFare cannot detect when this happens, but if
-## you set your username and password, FFF will log you in to prevent
-## that.
+## 'adult' or subscriber-only content, will censor the text of stories
+## to remove 'adult' words or entire portions of the text. This is why
+## an account is required to download stories from this site. It is
+## also strongly recommended to consider enabling auto_sub in order to
+## further avoid this from happening.
 #username:YourName
 #password:yourpassword
 
@@ -2197,6 +2198,14 @@
 ## chapter_start to remove the standard chapter title, as shown below.
 inject_chapter_title:false
 
+## This website removes certain HTML tags and portions of the story
+## from subscriber-only stories. It is strongly recommended to turn
+## this option on. This will automatically subscribe you to such
+## stories in order to acquire the unaltered text. You can
+## unsubscribe manually on the website after the story has been
+## downloaded.
+auto_sub:false
+
 [www.bdsmlibrary.com]
 ## Some sites also require the user to confirm they are adult for
 ## adult content.  Uncomment by removing '#' in front of is_adult.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/fanficfare/story.py 
new/FanFicFare-3.7.6/fanficfare/story.py
--- old/FanFicFare-3.7.0/fanficfare/story.py    2019-04-19 21:08:56.000000000 
+0200
+++ new/FanFicFare-3.7.6/fanficfare/story.py    2019-04-24 05:29:17.000000000 
+0200
@@ -703,7 +703,10 @@
             elif 'list' in tag['class']:
                 val = []
                 for i in tag.find_all('li'):
-                    val.append(i.string)
+                    # keeps &amp; but removes <li></li> because BS4
+                    # halps by converting NavigableString to string
+                    # (losing entities)
+                    val.append(unicode(i)[4:-5])
             elif 'int' in tag['class']:
                 # Python reports true when asked isinstance(<bool>, (int))
                 # bools now converted to unicode when set.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/setup.py 
new/FanFicFare-3.7.6/setup.py
--- old/FanFicFare-3.7.0/setup.py       2019-04-19 21:08:56.000000000 +0200
+++ new/FanFicFare-3.7.6/setup.py       2019-04-24 05:29:17.000000000 +0200
@@ -27,7 +27,7 @@
     name=package_name,
 
     # Versions should comply with PEP440.
-    version="3.7.0",
+    version="3.7.6",
 
     description='A tool for downloading fanfiction to eBook formats',
     long_description=long_description,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/webservice/app.yaml 
new/FanFicFare-3.7.6/webservice/app.yaml
--- old/FanFicFare-3.7.0/webservice/app.yaml    2019-04-19 21:08:56.000000000 
+0200
+++ new/FanFicFare-3.7.6/webservice/app.yaml    2019-04-24 05:29:17.000000000 
+0200
@@ -1,6 +1,6 @@
 # ffd-retief-hrd fanficfare
 application: fanficfare
-version: 3-7-0
+version: 3-7-6
 runtime: python27
 api_version: 1
 threadsafe: true
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-3.7.0/webservice/index.html 
new/FanFicFare-3.7.6/webservice/index.html
--- old/FanFicFare-3.7.0/webservice/index.html  2019-04-19 21:08:56.000000000 
+0200
+++ new/FanFicFare-3.7.6/webservice/index.html  2019-04-24 05:29:17.000000000 
+0200
@@ -84,7 +84,7 @@
             If you have any problems with this application, please
             report them in
             the <a 
href="https://groups.google.com/group/fanfic-downloader";>FanFicFare Google 
Group</a>.  The
-            <a href="https://3-6-0.fanficfare.appspot.com";>previous version</a>
+            <a href="https://3-7-0.fanficfare.appspot.com";>previous version</a>
             is also available for you to use if necessary.
           </p>
           <div id='error'>


Reply via email to