Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2024-10-02 21:34:35
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.19354 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Wed Oct  2 21:34:35 2024 rev:61 rq:1205187 version:4.39.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2024-09-30 15:40:09.480414143 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.19354/python-fanficfare.changes
   2024-10-02 21:34:57.602311621 +0200
@@ -1,0 +2,13 @@
+Wed Oct  2 05:41:39 UTC 2024 - Matej Cepl <mc...@cepl.eu>
+
+- Update to 4.39.0:
+  - Better error when utf8FromSoup called with None.
+  - adapter_asianfanficscom: Add Is adult toggle call,
+    use_cloudscraper:true in defaults.ini
+  - Fix(es) for get_url_search not found when seriesUrl doesn't
+    match an adapter site.
+  - Fix for regression when browser_cache_simple_header_old
+    added. #1104
+  - Add decode_emails option, defaults to false.
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-4.38.0.tar.gz

New:
----
  FanFicFare-4.39.0.tar.gz
  _scmsync.obsinfo
  build.specials.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.KruJcx/_old  2024-10-02 21:34:58.182335735 +0200
+++ /var/tmp/diff_new_pack.KruJcx/_new  2024-10-02 21:34:58.186335901 +0200
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %define skip_python2 1
 Name:           python-fanficfare
-Version:        4.38.0
+Version:        4.39.0
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-4.38.0.tar.gz -> FanFicFare-4.39.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/calibre-plugin/__init__.py 
new/FanFicFare-4.39.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.38.0/calibre-plugin/__init__.py    2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/calibre-plugin/__init__.py    2024-10-02 
03:52:39.000000000 +0200
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 38, 0)
+__version__ = (4, 39, 0)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/calibre-plugin/fff_plugin.py 
new/FanFicFare-4.39.0/calibre-plugin/fff_plugin.py
--- old/FanFicFare-4.38.0/calibre-plugin/fff_plugin.py  2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/calibre-plugin/fff_plugin.py  2024-10-02 
03:52:39.000000000 +0200
@@ -1372,9 +1372,10 @@
         # logger.debug("search 
seriesUrl:%s"%self.do_id_search(story.getMetadata('seriesUrl')))
         if not bgmeta:
             series = story.getMetadata('series')
-            if not merge and series and prefs['checkforseriesurlid']:
+            seriesUrl = story.getMetadata('seriesUrl')
+            if not merge and series and seriesUrl and 
prefs['checkforseriesurlid']:
                 # try to find *series anthology* by *seriesUrl* identifier url 
or uri first.
-                identicalbooks = 
self.do_id_search(story.getMetadata('seriesUrl'))
+                identicalbooks = self.do_id_search(seriesUrl)
                 # print("identicalbooks:%s"%identicalbooks)
                 if len(identicalbooks) > 0 and \
                         (prefs['auto_reject_seriesurlid'] or
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-4.39.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.38.0/calibre-plugin/plugin-defaults.ini    2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/calibre-plugin/plugin-defaults.ini    2024-10-02 
03:52:39.000000000 +0200
@@ -677,6 +677,10 @@
 ## single marks and is the recommended setting if you use it.
 #max_zalgo:1
 
+## Some site use a common obfuscation of email addresses.  Set
+## decode_emails:true for FFF to attempt to decode them.
+decode_emails:false
+
 ## Apply adapter's normalize_chapterurl() to all links in chapter
 ## texts, if they match the known pattern(s) for chapter URLs.  As of
 ## writing, base_xenforoforum, adapter_archiveofourownorg &
@@ -3171,6 +3175,8 @@
 ## personal.ini, not defaults.ini.
 #is_adult:true
 
+use_cloudscraper:true
+
 ## Additional metadata entries.
 extra_valid_entries:tags,characters,upvotes,subscribers,views
 tags_label:Tags
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/fanficfare/adapters/__init__.py 
new/FanFicFare-4.39.0/fanficfare/adapters/__init__.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/__init__.py       2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/__init__.py       2024-10-02 
03:52:39.000000000 +0200
@@ -237,7 +237,7 @@
     cls =  _get_class_for(url)[0]
     if not cls:
         ## still apply common processing.
-        cls = base_adapter
+        cls = base_adapter.BaseSiteAdapter
     return cls.get_url_search(url)
 
 def getAdapter(config,url,anyurl=False):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/adapters/adapter_adastrafanficcom.py 
new/FanFicFare-4.39.0/fanficfare/adapters/adapter_adastrafanficcom.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/adapter_adastrafanficcom.py       
2024-09-01 18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/adapter_adastrafanficcom.py       
2024-10-02 03:52:39.000000000 +0200
@@ -22,9 +22,9 @@
 from .base_otw_adapter import BaseOTWAdapter
 
 def getClass():
-    return SquidgeWorldOrgAdapter
+    return AdastrafanficComAdapter
 
-class SquidgeWorldOrgAdapter(BaseOTWAdapter):
+class AdastrafanficComAdapter(BaseOTWAdapter):
 
     def __init__(self, config, url):
         BaseOTWAdapter.__init__(self, config, url)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/adapters/adapter_asianfanficscom.py 
new/FanFicFare-4.39.0/fanficfare/adapters/adapter_asianfanficscom.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/adapter_asianfanficscom.py        
2024-09-01 18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/adapter_asianfanficscom.py        
2024-10-02 03:52:39.000000000 +0200
@@ -252,6 +252,22 @@
 
         data = self.get_request(url)
         soup = self.make_soup(data)
+        # logger.debug(data)
+
+        ageform = soup.select_one('form[action="/account/toggle_age"]')
+        # logger.debug(ageform)
+        if ageform and (self.is_adult or self.getConfig("is_adult")):
+            params = {}
+            params['is_of_age']=ageform.select_one('input#is_of_age')['value']
+            
params['current_url']=ageform.select_one('input#current_url')['value']
+            
params['csrf_aff_token']=ageform.select_one('input[name="csrf_aff_token"]')['value']
+            loginUrl = 'https://' + self.getSiteDomain() + 
'/account/mark_over_18'
+            logger.info("Will now toggle age to URL (%s)" % (loginUrl))
+            # logger.debug(params)
+            data = self.post_request(loginUrl, params)
+            soup = self.make_soup(data)
+            # logger.debug(data)
+
         content = soup.find('div', {'id': 'user-submitted-body'})
 
         if self.getConfig('inject_chapter_title'):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/adapters/adapter_storiesonlinenet.py 
new/FanFicFare-4.39.0/fanficfare/adapters/adapter_storiesonlinenet.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/adapter_storiesonlinenet.py       
2024-09-01 18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/adapter_storiesonlinenet.py       
2024-10-02 03:52:39.000000000 +0200
@@ -229,7 +229,7 @@
         #    <a href="/s/00001/This-is-a-test/1">Chapter 1</a>
         #    <a href="/n/00001/This-is-a-test/1">Chapter 1</a>
         chapters = soup.select('div#index-list a[href*="/s/"],div#index-list 
a[href*="/n/"]')
-        logger.debug(chapters)
+        # logger.debug(chapters)
         if len(chapters) != 0:
             logger.debug("Number of chapters: {0}".format(len(chapters)))
             for chapter in chapters:
@@ -546,7 +546,7 @@
         srtag = soup.find('div', id='sr')
 
         if srtag != None:
-            logger.debug('Getting more chapter text for: %s' % url)
+            # logger.debug('Getting more chapter text for: %s' % url)
             moretext = self.getMoreText(html)
             if moretext != None:
                 moresoup = self.make_soup(moretext)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/adapters/base_adapter.py 
new/FanFicFare-4.39.0/fanficfare/adapters/base_adapter.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/base_adapter.py   2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/base_adapter.py   2024-10-02 
03:52:39.000000000 +0200
@@ -39,7 +39,7 @@
 
 from ..story import Story
 from ..requestable import Requestable
-from ..htmlcleanup import stripHTML
+from ..htmlcleanup import stripHTML, decode_email
 from ..exceptions import InvalidStoryURL, StoryDoesNotExist, HTTPErrorFFF
 
 # was defined here before, imported for all the adapters that still
@@ -143,9 +143,9 @@
         '''
         # older idents can be uri vs url and have | instead of : after
         # http, plus many sites are now switching to https.
-        logger.debug(url)
+        # logger.debug(url)
         regexp = 
r'identifiers:"~ur(i|l):~^https?%s$"'%(re.sub(r'^https?','',re.escape(url)))
-        logger.debug(regexp)
+        # logger.debug(regexp)
         return regexp
 
     def _setURL(self,url):
@@ -634,6 +634,8 @@
                                # image problems when same chapter URL
                                # included more than once (base_xenforo
                                # always_include_first_post setting)
+        if not soup:
+            raise TypeError("utf8FromSoup called with soup (%s)"%soup)
         self.times.add("utf8FromSoup->copy", datetime.now() - start)
         ## _do_utf8FromSoup broken out to separate copy & timing and
         ## allow for inherit override.
@@ -657,6 +659,32 @@
         if not fetch:
             fetch=self.get_request_raw
 
+        if self.getConfig("decode_emails"):
+            # <a href="/cdn-cgi/l/email-protection" class="__cf_email__" 
data-cfemail="c7ada8afa9a3a8a287a2aaa6aeabe9a4a8aa">[email&#160;protected]</a>
+            # <a 
href="/cdn-cgi/l/email-protection#e3a18f8a8d87ae8c969086d2d7d0a3b3abac8d869790cd8c9184"><span
 class="__cf_email__" 
data-cfemail="296b4540474d64465c5a4c181d1a69796166474c5d5a07465b4e">[email&#160;protected]</span></a>
+            for emailtag in soup.select('a.__cf_email__') + 
soup.select('span.__cf_email__'):
+                tagtext = '(tagtext not set yet)'
+                try:
+                    tagtext = unicode(emailtag)
+                    emaildata = emailtag['data-cfemail']
+                    if not emaildata:
+                        continue
+                    addr = decode_email(emaildata)
+                    repltag = emailtag
+                    if( emailtag.name == 'span' and
+                        emailtag.parent.name == 'a' and
+                        
emailtag.parent['href'].startswith('/cdn-cgi/l/email-protection') ):
+                        repltag = emailtag.parent
+                    repltag.name='span'
+                    if repltag.has_attr('href'):
+                        del repltag['href']
+                    repltag['class']='decoded_email'
+                    repltag.string = addr
+                except Exception as e:
+                    logger.info("decode_emails failed on (%s)"%tagtext)
+                    logger.info(e)
+                    logger.debug(traceback.format_exc())
+
         acceptable_attributes = 
self.getConfigList('keep_html_attrs',['href','name','class','id','data-orighref'])
 
         if self.getConfig("keep_style_attr"):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/adapters/base_xenforoforum_adapter.py 
new/FanFicFare-4.39.0/fanficfare/adapters/base_xenforoforum_adapter.py
--- old/FanFicFare-4.38.0/fanficfare/adapters/base_xenforoforum_adapter.py      
2024-09-01 18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/adapters/base_xenforoforum_adapter.py      
2024-10-02 03:52:39.000000000 +0200
@@ -196,7 +196,7 @@
         # https://forums.sufficientvelocity.com/posts/10232301/
         regexp = 
re.sub(r"^(?P<keep>.*(\\\?|/)(threads|posts)).*(?P<delimiter>\\\.|/)(?P<id>\d+)/",
                         r"\g<keep>.*(\\.|/)\g<id>/",regexp)
-        logger.debug(regexp)
+        # logger.debug(regexp)
         return regexp
 
     def performLogin(self,data):
@@ -612,10 +612,10 @@
                     tstr = title(tstr)
                 if tagcat:
                     tagname = tagmap[tagcat['title']]
-                    logger.debug("Forum Tag(%s) Cat(%s) 
list(%s)"%(stripHTML(tag),tagcat['title'],tagname))
+                    # logger.debug("Forum Tag(%s) Cat(%s) 
list(%s)"%(stripHTML(tag),tagcat['title'],tagname))
                     self.story.addToList(tagname,tstr)
-                else:
-                    logger.debug("Forum Tag(%s) Uncategorized"%stripHTML(tag))
+                # else:
+                #     logger.debug("Forum Tag(%s) 
Uncategorized"%stripHTML(tag))
                 self.story.addToList('forumtags',tstr)
 
         # author moved down here to take from post URLs.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.38.0/fanficfare/browsercache/__init__.py 
new/FanFicFare-4.39.0/fanficfare/browsercache/__init__.py
--- old/FanFicFare-4.38.0/fanficfare/browsercache/__init__.py   2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/browsercache/__init__.py   2024-10-02 
03:52:39.000000000 +0200
@@ -17,7 +17,7 @@
 
 import os
 from ..exceptions import BrowserCacheException
-from .base_browsercache import BaseBrowserCache
+from .base_browsercache import BaseBrowserCache, CACHE_DIR_CONFIG
 ## SimpleCache and BlockfileCache are both flavors of cache used by Chrome.
 from .browsercache_simple import SimpleCache
 from .browsercache_blockfile import BlockfileCache
@@ -40,8 +40,8 @@
             if self.browser_cache_impl is not None:
                 break
         if self.browser_cache_impl is None:
-            raise BrowserCacheException("Directory does not contain a known 
browser cache type: '%s'"%
-                                        os.path.abspath(cache_dir))
+            raise BrowserCacheException("%s is not set, or directory does not 
contain a known browser cache type: '%s'"%
+                                        
(CACHE_DIR_CONFIG,getConfig_fn(CACHE_DIR_CONFIG)))
 
     def get_data(self, url):
         # logger.debug("get_data:%s"%url)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/fanficfare/cli.py 
new/FanFicFare-4.39.0/fanficfare/cli.py
--- old/FanFicFare-4.38.0/fanficfare/cli.py     2024-09-01 18:36:55.000000000 
+0200
+++ new/FanFicFare-4.39.0/fanficfare/cli.py     2024-10-02 03:52:39.000000000 
+0200
@@ -28,7 +28,7 @@
 import os, sys, platform
 
 
-version="4.38.0"
+version="4.39.0"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
@@ -511,6 +511,8 @@
                 if not options.nometachapters:
                     metadata['zchapters'] = []
                     for i, chap in enumerate(adapter.get_chapters()):
+                        # apply replace_chapter_text to chapter title.
+                        
chap['title']=adapter.story.do_chapter_text_replacements(chap['title'])
                         metadata['zchapters'].append((i+1,chap))
                 else:
                     # If no chapters, also suppress output_css so
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/fanficfare/configurable.py 
new/FanFicFare-4.39.0/fanficfare/configurable.py
--- old/FanFicFare-4.38.0/fanficfare/configurable.py    2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/configurable.py    2024-10-02 
03:52:39.000000000 +0200
@@ -205,6 +205,7 @@
                'remove_class_chapter':(None,None,boollist),
                'mark_new_chapters':(None,None,boollist+['latestonly']),
                'titlepage_use_table':(None,None,boollist),
+               'decode_emails':(None,None,boollist),
 
                'use_ssl_unverified_context':(None,None,boollist),
                'use_ssl_default_seclevelone':(None,None,boollist),
@@ -584,6 +585,7 @@
                  'show_nsfw_cover_images',
                  'show_spoiler_tags',
                  'max_zalgo',
+                 'decode_emails',
                  'epub_version',
                  'prepend_section_titles',
                  ])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/fanficfare/defaults.ini 
new/FanFicFare-4.39.0/fanficfare/defaults.ini
--- old/FanFicFare-4.38.0/fanficfare/defaults.ini       2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/defaults.ini       2024-10-02 
03:52:39.000000000 +0200
@@ -678,6 +678,10 @@
 ## single marks and is the recommended setting if you use it.
 #max_zalgo:1
 
+## Some site use a common obfuscation of email addresses.  Set
+## decode_emails:true for FFF to attempt to decode them.
+decode_emails:false
+
 ## Apply adapter's normalize_chapterurl() to all links in chapter
 ## texts, if they match the known pattern(s) for chapter URLs.  As of
 ## writing, base_xenforoforum, adapter_archiveofourownorg &
@@ -3166,6 +3170,8 @@
 ## personal.ini, not defaults.ini.
 #is_adult:true
 
+use_cloudscraper:true
+
 ## Additional metadata entries.
 extra_valid_entries:tags,characters,upvotes,subscribers,views
 tags_label:Tags
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/fanficfare/htmlcleanup.py 
new/FanFicFare-4.39.0/fanficfare/htmlcleanup.py
--- old/FanFicFare-4.38.0/fanficfare/htmlcleanup.py     2024-09-01 
18:36:55.000000000 +0200
+++ new/FanFicFare-4.39.0/fanficfare/htmlcleanup.py     2024-10-02 
03:52:39.000000000 +0200
@@ -22,10 +22,16 @@
 import re
 
 # py2 vs py3 transition
+from .six.moves.urllib.parse import unquote
 from .six import text_type as unicode
 from .six import string_types as basestring
 from .six import ensure_text
 from .six import unichr
+from .six import PY2
+if PY2:
+    from cgi import escape as htmlescape
+else: # PY3
+    from html import escape as htmlescape
 
 def _unirepl(match):
     "Return the unicode string for a decimal number"
@@ -179,6 +185,19 @@
             count+=1
     return ''.join(lineout)
 
+def parse_hex(n, c):
+    r = n[c:c+2]
+    return int(r, 16)
+
+def decode_email(n, c=0):
+    o = ""
+    a = parse_hex(n, c)
+    for i in range(c + 2, len(n), 2):
+        l = parse_hex(n, i) ^ a
+        o += chr(l)
+    o = unquote(o)
+    return htmlescape(o)
+
 # entity list from 
http://code.google.com/p/doctype/wiki/CharacterEntitiesConsistent
 entities = { '&aacute;' : 'á',
          '&Aacute;' : 'Á',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.38.0/pyproject.toml 
new/FanFicFare-4.39.0/pyproject.toml
--- old/FanFicFare-4.38.0/pyproject.toml        2024-09-01 18:36:55.000000000 
+0200
+++ new/FanFicFare-4.39.0/pyproject.toml        2024-10-02 03:52:39.000000000 
+0200
@@ -16,7 +16,7 @@
 #
 # For a discussion on single-sourcing the version, see
 # https://packaging.python.org/guides/single-sourcing-package-version/
-version = "4.38.0"
+version = "4.39.0"
 
 # This is a one-line description or tagline of what your project does. This
 # corresponds to the "Summary" metadata field:

++++++ _scmsync.obsinfo ++++++
mtime: 1727848870
commit: 9805c21edd3aac965b1e87d9a381126da9bff77cedac9406099d7e4e9c92da84
url: https://src.opensuse.org/mcepl_pkgs/python-fanficfare.git
revision: 9805c21edd3aac965b1e87d9a381126da9bff77cedac9406099d7e4e9c92da84

Reply via email to