Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2025-07-06 17:13:45
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.1903 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Sun Jul  6 17:13:45 2025 rev:69 rq:1290547 version:4.47.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2025-06-10 09:11:02.405709379 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.1903/python-fanficfare.changes
    2025-07-06 17:17:45.767413990 +0200
@@ -1,0 +2,30 @@
+Thu Jul  3 20:33:09 UTC 2025 - Matej Cepl <mc...@cepl.eu>
+
+- Update to 4.47.0:
+  - adapter_ashwindersycophanthexcom: http to https
+  - Plugin BG Jobs: Remove old multi-process code
+  - Report BG job failed entirely as individual books failed
+    instead of just exception. For #1225
+  - adapter_fimfictionnet: New img attr and class. #1226
+  - Send refresh_screen=True when updating Reading Lists in case
+    of series column updates.
+  - Add SB favicons to cover_exclusion_regexp.
+  - Support for logging into royal road to keep chapter progress
+    (and count as page views), #1222, thanks snoonan.
+  - Fix images from existing epub being discarded during update.
+  - Change default base_xenforoforum minimum_threadmarks:1. See
+    #1218
+  - Shutdown IMAP connection when done with it.
+  - Mildly kludgey fix for status bar notifications.
+  - PI BG Jobs: Fix split without reconsolidate.
+  - Py2 fix for split BG jobs, closes #1214
+  - Fix xenforo2 prefixtags, some still using tags in title
+  - alternatehistory needs at least cloudscraper now, it seems.
+  - Add use_flaresolverr_session and flaresolverr_session
+    settings for #1211
+  - Include Accept:image/* header when requesting an image url,
+    thanks bellisk
+  - Skip OTW(AO3) login when open_pages_in_browser AND
+    use_browser_cache AND use_browser_cache_only
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-4.46.0.tar.gz

New:
----
  FanFicFare-4.47.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.q8SQDy/_old  2025-07-06 17:17:46.443441945 +0200
+++ /var/tmp/diff_new_pack.q8SQDy/_new  2025-07-06 17:17:46.443441945 +0200
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %define skip_python2 1
 Name:           python-fanficfare
-Version:        4.46.0
+Version:        4.47.0
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-4.46.0.tar.gz -> FanFicFare-4.47.0.tar.gz ++++++
++++ 4109 lines of diff (skipped)
++++    retrying with extended exclude list
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/__init__.py 
new/FanFicFare-4.47.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.46.0/calibre-plugin/__init__.py    2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/__init__.py    2025-07-03 
15:21:33.000000000 +0200
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 46, 0)
+__version__ = (4, 47, 0)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/config.py 
new/FanFicFare-4.47.0/calibre-plugin/config.py
--- old/FanFicFare-4.46.0/calibre-plugin/config.py      2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/config.py      2025-07-03 
15:21:33.000000000 +0200
@@ -417,7 +417,6 @@
             prefs['update_existing_only_from_email'] = 
self.imap_tab.update_existing_only_from_email.isChecked()
             prefs['download_from_email_immediately'] = 
self.imap_tab.download_from_email_immediately.isChecked()
 
-            prefs['single_proc_jobs'] = 
self.other_tab.single_proc_jobs.isChecked()
             prefs['site_split_jobs'] = 
self.other_tab.site_split_jobs.isChecked()
             prefs['reconsolidate_jobs'] = 
self.other_tab.reconsolidate_jobs.isChecked()
 
@@ -1309,11 +1308,6 @@
         label.setWordWrap(True)
         groupl.addWidget(label)
 
-        self.single_proc_jobs = QCheckBox(_('Use new, single process 
background jobs'),self)
-        self.single_proc_jobs.setToolTip(_("Uncheck to go back to old 
multi-process BG jobs."))
-        self.single_proc_jobs.setChecked(prefs['single_proc_jobs'])
-        groupl.addWidget(self.single_proc_jobs)
-
         label = QLabel("<p>"+
                        _("Options with the new version:")+
                        "<ul>"+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/fff_plugin.py 
new/FanFicFare-4.47.0/calibre-plugin/fff_plugin.py
--- old/FanFicFare-4.46.0/calibre-plugin/fff_plugin.py  2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/fff_plugin.py  2025-07-03 
15:21:33.000000000 +0200
@@ -39,7 +39,7 @@
 import traceback
 from collections import defaultdict
 
-from PyQt5.Qt import (QApplication, QMenu, QTimer, QToolButton, pyqtSignal)
+from PyQt5.Qt import (QApplication, QMenu, QTimer, QToolButton, pyqtSignal, 
QEventLoop)
 
 from calibre.ptempfile import PersistentTemporaryFile, 
PersistentTemporaryDirectory, remove_dir
 from calibre.ebooks.metadata import MetaInformation
@@ -541,11 +541,11 @@
     def update_lists(self,checked,add=True):
         if prefs['addtolists'] or prefs['addtoreadlists']:
             if not self.is_library_view():
-                self.gui.status_bar.show_message(_('Cannot Update Reading 
Lists from Device View'), 3000)
+                self.do_status_message(_('Cannot Update Reading Lists from 
Device View'), 3000)
                 return
 
             if len(self.gui.library_view.get_selected_ids()) == 0:
-                self.gui.status_bar.show_message(_('No Selected Books to 
Update Reading Lists'), 3000)
+                self.do_status_message(_('No Selected Books to Update Reading 
Lists'), 3000)
                 return
 
             
self.update_reading_lists(self.gui.library_view.get_selected_ids(),add)
@@ -590,7 +590,7 @@
 
         try:
             with busy_cursor():
-                self.gui.status_bar.show_message(_('Fetching Story URLs from 
Email...'),6000)
+                self.do_status_message(_('Fetching Story URLs from 
Email...'),1000)
                 url_list = get_urls_from_imap(prefs['imapserver'],
                                               prefs['imapuser'],
                                               imap_pass,
@@ -634,7 +634,7 @@
                 notupdate_list = set([x for x in url_list if not 
self.do_id_search(adapters.getNormalStoryURL(x))])
             url_list = url_list - notupdate_list
 
-            self.gui.status_bar.show_message(_('No Valid Story URLs Found in 
Unread Emails.'),3000)
+            self.do_status_message(_('No Valid Story URLs Found in Unread 
Emails.'),3000)
 
         if prefs['download_from_email_immediately']:
             ## do imap fetch w/o GUI elements
@@ -650,7 +650,7 @@
                         'add_tag':prefs['imaptags'],
                         },"\n".join(url_list))
             else:
-                self.gui.status_bar.show_message(_('Finished Fetching Story 
URLs from Email.'),3000)
+                self.do_status_message(_('Finished Fetching Story URLs from 
Email.'),3000)
 
         else:
             if url_list:
@@ -707,12 +707,12 @@
                 return
 
         with busy_cursor():
-            self.gui.status_bar.show_message(_('Fetching Story URLs from 
Page...'))
+            self.do_status_message(_('Fetching Story URLs from Page...'))
 
             frompage = self.get_urls_from_page(url)
             url_list = frompage.get('urllist',[])
 
-            self.gui.status_bar.show_message(_('Finished Fetching Story URLs 
from Page.'),3000)
+            self.do_status_message(_('Finished Fetching Story URLs from 
Page.'),3000)
 
         if url_list:
             # make a copy before adding to avoid changing passed param
@@ -737,7 +737,7 @@
     def list_story_urls(self,checked):
         '''Get list of URLs from existing books.'''
         if not self.gui.current_view().selectionModel().selectedRows() :
-            self.gui.status_bar.show_message(_('No Selected Books to Get URLs 
From'),
+            self.do_status_message(_('No Selected Books to Get URLs From'),
                                              3000)
             return
 
@@ -784,12 +784,12 @@
     def unnew_books(self,checked):
         '''Get list of URLs from existing books.'''
         if not self.is_library_view():
-            self.gui.status_bar.show_message(_('Can only UnNew books in 
library'),
+            self.do_status_message(_('Can only UnNew books in library'),
                                              3000)
             return
 
         if not self.gui.current_view().selectionModel().selectedRows() :
-            self.gui.status_bar.show_message(_('No Selected Books to Get URLs 
From'),
+            self.do_status_message(_('No Selected Books to Get URLs From'),
                                              3000)
             return
 
@@ -850,7 +850,7 @@
             changed_ids = [ x['calibre_id'] for x in book_list if x['changed'] 
]
             if changed_ids:
                 logger.debug(_('Starting auto conversion of %d 
books.')%(len(changed_ids)))
-                self.gui.status_bar.show_message(_('Starting auto conversion 
of %d books.')%(len(changed_ids)), 3000)
+                self.do_status_message(_('Starting auto conversion of %d 
books.')%(len(changed_ids)), 3000)
                 self.gui.iactions['Convert 
Books'].auto_convert_auto_add(changed_ids)
 
     def reject_list_urls(self,checked):
@@ -865,7 +865,7 @@
             book_list = [ self.make_book_from_device_row(x) for x in rows ]
 
         if len(book_list) == 0 :
-            self.gui.status_bar.show_message(_('No Selected Books have URLs to 
Reject'), 3000)
+            self.do_status_message(_('No Selected Books have URLs to Reject'), 
3000)
             return
 
         # Progbar because fetching urls from device epubs can be slow.
@@ -941,15 +941,15 @@
     def update_anthology(self,checked,extraoptions={}):
         self.check_valid_collision(extraoptions)
         if not self.get_epubmerge_plugin():
-            self.gui.status_bar.show_message(_('Cannot Make Anthologys without 
%s')%'EpubMerge 1.3.1+', 3000)
+            self.do_status_message(_('Cannot Make Anthologys without 
%s')%'EpubMerge 1.3.1+', 3000)
             return
 
         if not self.is_library_view():
-            self.gui.status_bar.show_message(_('Cannot Update Books from 
Device View'), 3000)
+            self.do_status_message(_('Cannot Update Books from Device View'), 
3000)
             return
 
         if len(self.gui.library_view.get_selected_ids()) != 1:
-            self.gui.status_bar.show_message(_('Can only update 1 anthology at 
a time'), 3000)
+            self.do_status_message(_('Can only update 1 anthology at a time'), 
3000)
             return
 
         db = self.gui.current_db
@@ -959,13 +959,13 @@
 
         try:
             with busy_cursor():
-                self.gui.status_bar.show_message(_('Fetching Story URLs for 
Series...'))
+                self.do_status_message(_('Fetching Story URLs for Series...'))
                 book_id = self.gui.library_view.get_selected_ids()[0]
                 mergebook = self.make_book_id_only(book_id)
                 self.populate_book_from_calibre_id(mergebook, db)
 
                 if not db.has_format(book_id,'EPUB',index_is_id=True):
-                    self.gui.status_bar.show_message(_('Can only Update Epub 
Anthologies'), 3000)
+                    self.do_status_message(_('Can only Update Epub 
Anthologies'), 3000)
                     return
 
                 tdir = PersistentTemporaryDirectory(prefix='fff_anthology_')
@@ -996,7 +996,7 @@
 
                 url_list_text = "\n".join(url_list)
 
-                self.gui.status_bar.show_message(_('Finished Fetching Story 
URLs for Series.'),3000)
+                self.do_status_message(_('Finished Fetching Story URLs for 
Series.'),3000)
         except NotAnthologyException:
             # using an exception purely to get outside 'with busy_cursor:'
             info_dialog(self.gui, _("Cannot Update Anthology"),
@@ -1071,14 +1071,14 @@
 
     def update_dialog(self,checked,id_list=None,extraoptions={}):
         if not self.is_library_view():
-            self.gui.status_bar.show_message(_('Cannot Update Books from 
Device View'), 3000)
+            self.do_status_message(_('Cannot Update Books from Device View'), 
3000)
             return
 
         if not id_list:
             id_list = self.gui.library_view.get_selected_ids()
 
         if len(id_list) == 0:
-            self.gui.status_bar.show_message(_('No Selected Books to Update'), 
3000)
+            self.do_status_message(_('No Selected Books to Update'), 3000)
             return
 
         self.check_valid_collision(extraoptions)
@@ -1183,7 +1183,7 @@
                 win_title=_("Downloading metadata for stories")
                 status_prefix=_("Fetched metadata for")
 
-            self.gui.status_bar.show_message(status_bar, 3000)
+            self.do_status_message(status_bar, 3000)
             LoopProgressDialog(self.gui,
                                books,
                                partial(self.prep_download_loop, options = 
options, merge=merge),
@@ -1192,7 +1192,7 @@
                                win_title=win_title,
                                status_prefix=status_prefix)
         else:
-            self.gui.status_bar.show_message(_('No valid story URLs 
entered.'), 3000)
+            self.do_status_message(_('No valid story URLs entered.'), 3000)
         # LoopProgressDialog calls prep_download_loop for each 'good' story,
         # prep_download_loop updates book object for each with metadata from 
site,
         # LoopProgressDialog calls start_download_job at the end which goes
@@ -1810,15 +1810,9 @@
         # get libs from plugin zip.
         options['plugin_path'] = self.interface_action_base_plugin.plugin_path
 
-        if prefs['single_proc_jobs']: ## YYY Single BG job
-            args = ['calibre_plugins.fanficfare_plugin.jobs',
-                    'do_download_worker_single',
-                    (site, book_list, options, merge)]
-        else: ## MultiBG Job split by site
-            cpus = self.gui.job_manager.server.pool_size
-            args = ['calibre_plugins.fanficfare_plugin.jobs',
-                    'do_download_worker_multiproc',
-                    (site, book_list, options, cpus, merge)]
+        args = ['calibre_plugins.fanficfare_plugin.jobs',
+                'do_download_worker_single',
+                (site, book_list, options, merge)]
         if site:
             desc = _('Download %s FanFiction Book(s) for %s') % (sum(1 for x 
in book_list if x['good']),site)
         else:
@@ -1833,12 +1827,13 @@
         self.download_job_manager.get_batch(options['tdir']).add_job(site,job)
         job.tdir=options['tdir']
         job.site=site
+        job.orig_book_list = book_list
         # set as part of job, otherwise *changing* reconsolidate_jobs
         # after launch could cause job results to be ignored.
         job.reconsolidate=prefs['reconsolidate_jobs']  # YYY batch update
 
         self.gui.jobs_pointer.start()
-        self.gui.status_bar.show_message(_('Starting %d FanFicFare 
Downloads')%len(book_list),3000)
+        self.do_status_message(_('Starting %d FanFicFare 
Downloads')%len(book_list),3000)
 
     def do_mark_series_anthologies(self,mark_anthology_ids):
         if prefs['mark_series_anthologies'] and mark_anthology_ids:
@@ -1983,7 +1978,7 @@
                 self.gui.library_view.sort_by_named_field('marked', True)
 
         logger.debug(_('Finished Adding/Updating %d books.')%(len(update_list) 
+ len(add_list)))
-        self.gui.status_bar.show_message(_('Finished Adding/Updating %d 
books.')%(len(update_list) + len(add_list)), 3000)
+        self.do_status_message(_('Finished Adding/Updating %d 
books.')%(len(update_list) + len(add_list)), 3000)
         batch = self.download_job_manager.get_batch(options['tdir'])
         batch.finish_job(options.get('site',None))
         if batch.all_done():
@@ -2019,7 +2014,7 @@
 
         if prefs['autoconvert'] and all_not_calonly_ids:
             logger.debug(_('Starting auto conversion of %d 
books.')%(len(all_ids)))
-            self.gui.status_bar.show_message(_('Starting auto conversion of %d 
books.')%(len(all_ids)), 3000)
+            self.do_status_message(_('Starting auto conversion of %d 
books.')%(len(all_ids)), 3000)
             self.gui.iactions['Convert 
Books'].auto_convert_auto_add(all_not_calonly_ids)
 
     def download_list_completed(self, job, options={},merge=False):
@@ -2027,10 +2022,27 @@
         site = job.site
         logger.debug("Batch Job:%s %s"%(tdir,site))
         batch = self.download_job_manager.get_batch(tdir)
-        batch.finish_job(site)
+
         if job.failed:
-            self.gui.job_exception(job, dialog_title='Failed to Download 
Stories')
-            return
+            # logger.debug(job.orig_book_list)
+            ## I don't *think* there would be any harm to modifying
+            ## the original book list, but I elect not to chance it.
+            failedjobresult = copy.deepcopy(job.orig_book_list)
+            for x in failedjobresult:
+                if x['good']:
+                    ## may have failed before reaching BG job.
+                    x['good'] = False
+                    x['status'] = _('Error')
+                    x['added'] = False
+                    x['reportorder'] = x['listorder']+10000000 # force to end.
+                    x['comment'] = _('Background Job Failed, see Calibre Jobs 
log.')
+                    x['showerror'] = True
+            self.gui.job_exception(job, dialog_title=_('Background Job Failed 
to Download Stories for (%s)')%job.site)
+            job.result = failedjobresult
+
+        if job.reconsolidate: # YYY batch update
+            logger.debug("batch.finish_job(%s)"%site)
+            batch.finish_job(site)
 
         showsite = None
         # set as part of job, otherwise *changing* reconsolidate_jobs
@@ -2040,7 +2052,7 @@
                 book_list = batch.get_results()
             else:
                 return
-        elif not job.failed:
+        else:
             showsite = site
             book_list = job.result
 
@@ -2053,13 +2065,11 @@
         good_list = [ x for x in book_list if x['good'] ]
         bad_list = [ x for x in book_list if not x['good'] ]
         chapter_error_list = [ x for x in book_list if 'chapter_error_count' 
in  x ]
-        try:
-            good_list = sorted(good_list,key=lambda x : x['reportorder'])
-            bad_list = sorted(bad_list,key=lambda x : x['reportorder'])
-        except KeyError:
-            good_list = sorted(good_list,key=lambda x : x['listorder'])
-            bad_list = sorted(bad_list,key=lambda x : x['listorder'])
-        #print("book_list:%s"%book_list)
+
+        sort_func = lambda x : x.get('reportorder',x['listorder'])
+        good_list = sorted(good_list,key=sort_func)
+        bad_list = sorted(bad_list,key=sort_func)
+
         payload = (good_list, bad_list, options)
 
         msgl = [ _('FanFicFare found <b>%s</b> good and <b>%s</b> bad 
updates.')%(len(good_list),len(bad_list)) ]
@@ -2122,6 +2132,15 @@
                                  htmllog,
                                  msgl)
 
+    def do_status_message(self,message,timeout=0):
+        self.gui.status_bar.show_message(message,timeout)
+        try:
+            
QApplication.processEvents(QEventLoop.ProcessEventsFlag.ExcludeUserInputEvents)
+        except:
+            ## older versions of qt don't have ExcludeUserInputEvents.
+            ## but they also don't need the processEvents() call
+            pass
+
     def do_proceed_question(self, update_func, payload, htmllog, msgl):
         msg = '<p>'+'</p>\n<p>'.join(msgl)+ '</p>\n'
         def proceed_func(*args, **kwargs):
@@ -2149,7 +2168,7 @@
             good_list = sorted(good_list,key=lambda x : x['listorder'])
             bad_list = sorted(bad_list,key=lambda x : x['listorder'])
 
-            self.gui.status_bar.show_message(_('Merging %s books.')%total_good)
+            self.do_status_message(_('Merging %s books.')%total_good)
 
             existingbook = None
             if 'mergebook' in options:
@@ -2244,7 +2263,7 @@
         good_list = sorted(good_list,key=lambda x : x['listorder'])
         bad_list = sorted(bad_list,key=lambda x : x['listorder'])
 
-        self.gui.status_bar.show_message(_('FanFicFare Adding/Updating 
books.'))
+        self.do_status_message(_('FanFicFare Adding/Updating books.'))
         errorcol_label = self.get_custom_col_label(prefs['errorcol'])
         lastcheckedcol_label = 
self.get_custom_col_label(prefs['lastcheckedcol'])
 
@@ -2742,7 +2761,7 @@
                     addremovefunc(l,
                                   book_ids,
                                   display_warnings=False,
-                                  refresh_screen=False)
+                                  refresh_screen=True)
                 else:
                     if l != '':
                         message="<p>"+_("You configured FanFicFare to 
automatically update Reading List '%s', but you don't have a list of that 
name?")%l+"</p>"
@@ -2761,7 +2780,7 @@
                                                 #add_book_ids,
                                                 book_ids,
                                                 display_warnings=False,
-                                                refresh_screen=False)
+                                                refresh_screen=True)
                 else:
                     if l != '':
                         message="<p>"+_("You configured FanFicFare to 
automatically update Reading List '%s', but you don't have a list of that 
name?")%l+"</p>"
@@ -3208,7 +3227,6 @@
                           for k, v in d.items()])
     return "%s%s"%(kindent, d)
 
-from collections.abc import Iterable   # import directly from collections for 
Python < 3.3
 class DownloadBatch():
     def __init__(self,tdir=None):
         self.runningjobs = dict() # keyed by site
@@ -3232,7 +3250,12 @@
         retlist = []
         for j in self.jobsorder:
             ## failed / no result
-            if isinstance(j.result, Iterable):
+            try:
+                iter(j.result)
+            except TypeError:
+                # not iterable  abc.Iterable only in newer pythons
+                pass
+            else:
                 retlist.extend(j.result)
         return retlist
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/jobs.py 
new/FanFicFare-4.47.0/calibre-plugin/jobs.py
--- old/FanFicFare-4.46.0/calibre-plugin/jobs.py        2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/jobs.py        2025-07-03 
15:21:33.000000000 +0200
@@ -16,8 +16,6 @@
 from collections import defaultdict
 import sys
 
-from calibre.utils.ipc.server import Empty, Server
-from calibre.utils.ipc.job import ParallelJob
 from calibre.utils.date import local_tz
 
 # pulls in translation files for _() strings
@@ -32,125 +30,6 @@
 #
 # 
------------------------------------------------------------------------------
 
-def do_download_worker_multiproc(site,
-                                 book_list,
-                                 options,
-                                 cpus,
-                                 merge,
-                                 notification=lambda x,y:x):
-    '''
-    Coordinator job, to launch child jobs to do downloads.
-    This is run as a worker job in the background to keep the UI more
-    responsive and get around any memory leak issues as it will launch
-    a child job for each book as a worker process
-    '''
-    ## Now running one BG proc per site, which downloads for the same
-    ## site in serial.
-    logger.info("CPUs:%s"%cpus)
-    server = Server(pool_size=cpus)
-
-    logger.info(options['version'])
-
-    ## same info debug calibre prints out at startup. For when users
-    ## give me job output instead of debug log.
-    from calibre.debug import print_basic_debug_info
-    print_basic_debug_info(sys.stderr)
-
-    sites_lists = defaultdict(list)
-    [ sites_lists[x['site']].append(x) for x in book_list if x['good'] ]
-
-    totals = {}
-    # can't do direct assignment in list comprehension?  I'm sure it
-    # makes sense to some pythonista.
-    # [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
-    [ totals.update({x['url']:0.0}) for x in book_list if x['good']  ]
-    # logger.debug(sites_lists.keys())
-
-    # Queue all the jobs
-    jobs_running = 0
-    for site in sites_lists.keys():
-        site_list = sites_lists[site]
-        logger.info(_("Launch background process for site %s:")%site + "\n" +
-                    "\n".join([ x['url'] for x in site_list ]))
-        # logger.debug([ x['url'] for x in site_list])
-        args = ['calibre_plugins.fanficfare_plugin.jobs',
-                'do_download_site',
-                (site,site_list,options,merge)]
-        job = ParallelJob('arbitrary_n',
-                          "site:(%s)"%site,
-                          done=None,
-                          args=args)
-        job._site_list = site_list
-        job._processed = False
-        server.add_job(job)
-        jobs_running += 1
-
-    # This server is an arbitrary_n job, so there is a notifier available.
-    # Set the % complete to a small number to avoid the 'unavailable' indicator
-    notification(0.01, _('Downloading FanFiction Stories'))
-
-    # dequeue the job results as they arrive, saving the results
-    count = 0
-    while True:
-        job = server.changed_jobs_queue.get()
-        # logger.debug("job get job._processed:%s"%job._processed)
-        # A job can 'change' when it is not finished, for example if it
-        # produces a notification.
-        msg = None
-        try:
-            ## msg = book['url']
-            (percent,msg) = job.notifications.get_nowait()
-            # logger.debug("%s<-%s"%(percent,msg))
-            if percent == 10.0: # Only when signaling d/l done.
-                count += 1
-                totals[msg] = 1.0/len(totals)
-                # logger.info("Finished: %s"%msg)
-            else:
-                totals[msg] = percent/len(totals)
-            notification(max(0.01,sum(totals.values())), _('%(count)d of 
%(total)d stories finished downloading')%{'count':count,'total':len(totals)})
-        except Empty:
-            pass
-        # without update, is_finished will never be set.  however, we
-        # do want to get all the notifications for status so we don't
-        # miss the 'done' ones.
-        job.update(consume_notifications=False)
-
-        # if not job._processed:
-        #     sleep(0.5)
-        ## Can have a race condition where job.is_finished before
-        ## notifications for all downloads have been processed.
-        ## Or even after the job has been finished.
-        # logger.debug("job.is_finished(%s) or 
job._processed(%s)"%(job.is_finished, job._processed))
-        if not job.is_finished:
-            continue
-
-        ## only process each job once.  We can get more than one loop
-        ## after job.is_finished.
-        if not job._processed:
-            # sleep(1)
-            # A job really finished. Get the information.
-
-            ## This is where bg proc details end up in GUI log.
-            ## job.details is the whole debug log for each proc.
-            logger.info("\n\n" + ("="*80) + " " + job.details.replace('\r',''))
-            # logger.debug("Finished background process for site 
%s:\n%s"%(job._site_list[0]['site'],"\n".join([ x['url'] for x in 
job._site_list ])))
-            for b in job._site_list:
-                book_list.remove(b)
-            book_list.extend(job.result)
-            job._processed = True
-            jobs_running -= 1
-
-        ## Can't use individual count--I've seen stories all reported
-        ## finished before results of all jobs processed.
-        if jobs_running == 0:
-            ret_list = finish_download(book_list)
-            break
-
-    server.close()
-
-    # return the book list as the job result
-    return ret_list
-
 def do_download_worker_single(site,
                               book_list,
                               options,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-4.47.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.46.0/calibre-plugin/plugin-defaults.ini    2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/plugin-defaults.ini    2025-07-03 
15:21:33.000000000 +0200
@@ -760,7 +760,7 @@
 max_fg_sleep_at_downloads:4
 
 ## exclude emoji and default avatars.
-cover_exclusion_regexp:(/styles/|xenforo/avatars/avatar.*\.png|https://cdn\.jsdelivr\.net/gh/|https://cdn\.jsdelivr\.net/emojione)
+cover_exclusion_regexp:(/styles/|xenforo/avatars/avatar.*\.png|https://cdn\.jsdelivr\.net/gh/|https://cdn\.jsdelivr\.net/emojione|/data/svg/2/1/\d+/2022_favicon_[^.]*\.png)
 
 ## use author(original poster)'s avatar as cover image when true.
 author_avatar_cover:false
@@ -869,7 +869,9 @@
 ## there are at least this many threadmarks.  A number of older
 ## threads have a single threadmark to an 'index' post.  Set to 1 to
 ## use threadmarks whenever they exist.
-minimum_threadmarks:2
+## Update Jun2025: Default changed to 1, index posts are not a common
+## thing anymore.
+minimum_threadmarks:1
 
 ## When 'first post' (or post URL) is being added as a chapter, give
 ## the chapter this title.
@@ -3379,6 +3381,15 @@
 ## than other XF2 sites.
 threadmarks_per_page:50
 
+## Using cloudscraper can satisfy the first couple levels of
+## Cloudflare bot-proofing, but not all levels.  Older versions of
+## OpenSSL will also raise problems, so versions of Calibre older than
+## v5 will probably fail.  Only a few sites are configured with
+## use_cloudscraper:true by default, but it can be applied in other
+## sites' ini sections.  user_agent setting is ignored when
+## use_cloudscraper:true
+use_cloudscraper:true
+
 [www.aneroticstory.com]
 use_basic_cache:true
 ## Some sites do not require a login, but do require the user to
@@ -4190,6 +4201,12 @@
 use_basic_cache:true
 extra_valid_entries:stars
 
+## royalroad is a little unusual--it doesn't require user/pass, but the site
+## keeps track of which chapters you've read.  This way, on download,
+## it thinks you're up to date.
+#username:YourName
+#password:yourpassword
+
 #add_to_extra_titlepage_entries:,stars
 
 ## some sites include images that we don't ever want becoming the
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/calibre-plugin/prefs.py 
new/FanFicFare-4.47.0/calibre-plugin/prefs.py
--- old/FanFicFare-4.46.0/calibre-plugin/prefs.py       2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/calibre-plugin/prefs.py       2025-07-03 
15:21:33.000000000 +0200
@@ -197,7 +197,8 @@
 default_prefs['update_existing_only_from_email'] = False
 default_prefs['download_from_email_immediately'] = False
 
-default_prefs['single_proc_jobs'] = True
+
+#default_prefs['single_proc_jobs'] = True # setting and code removed
 default_prefs['site_split_jobs'] = True
 default_prefs['reconsolidate_jobs'] = True
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/adapter_ashwindersycophanthexcom.py 
new/FanFicFare-4.47.0/fanficfare/adapters/adapter_ashwindersycophanthexcom.py
--- 
old/FanFicFare-4.46.0/fanficfare/adapters/adapter_ashwindersycophanthexcom.py   
    2025-06-07 03:02:47.000000000 +0200
+++ 
new/FanFicFare-4.47.0/fanficfare/adapters/adapter_ashwindersycophanthexcom.py   
    2025-07-03 15:21:33.000000000 +0200
@@ -48,7 +48,7 @@
 
 
         # normalized story URL.
-        self._setURL('http://' + self.getSiteDomain() + 
'/viewstory.php?sid='+self.story.getMetadata('storyId'))
+        self._setURL('https://' + self.getSiteDomain() + 
'/viewstory.php?sid='+self.story.getMetadata('storyId'))
 
         # Each adapter needs to have a unique site abbreviation.
         self.story.setMetadata('siteabbrev','asph')
@@ -64,10 +64,10 @@
 
     @classmethod
     def getSiteExampleURLs(cls):
-        return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
+        return "https://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
 
     def getSiteURLPattern(self):
-        return 
re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
+        return 
r"https?://"+re.escape(self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
 
     ## Login seems to be reasonably standard across eFiction sites.
     def needToLoginCheck(self, data):
@@ -92,7 +92,7 @@
         params['intent'] = ''
         params['submit'] = 'Submit'
 
-        loginUrl = 'http://' + self.getSiteDomain() + '/user.php'
+        loginUrl = 'https://' + self.getSiteDomain() + '/user.php'
         logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
                                                               
params['penname']))
 
@@ -130,7 +130,7 @@
         # Find authorid and URL from... author url.
         a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
         self.story.setMetadata('authorId',a['href'].split('=')[1])
-        self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
+        self.story.setMetadata('authorUrl','https://'+self.host+'/'+a['href'])
         self.story.setMetadata('author',a.string)
         asoup = 
self.make_soup(self.get_request(self.story.getMetadata('authorUrl')))
 
@@ -138,7 +138,7 @@
             # in case link points somewhere other than the first chapter
             a = soup.find_all('option')[1]['value']
             self.story.setMetadata('storyId',a.split('=',)[1])
-            url = 'http://'+self.host+'/'+a
+            url = 'https://'+self.host+'/'+a
             soup = self.make_soup(self.get_request(url))
         except:
             pass
@@ -157,7 +157,7 @@
         else:
             for chapter in chapters:
                 # just in case there's tags, like <i> in chapter titles.
-                
self.add_chapter(chapter,'http://'+self.host+'/'+chapter['href'])
+                
self.add_chapter(chapter,'https://'+self.host+'/'+chapter['href'])
 
 
         # eFiction sites don't help us out a lot with their meta data
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/adapter_fimfictionnet.py 
new/FanFicFare-4.47.0/fanficfare/adapters/adapter_fimfictionnet.py
--- old/FanFicFare-4.46.0/fanficfare/adapters/adapter_fimfictionnet.py  
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/adapters/adapter_fimfictionnet.py  
2025-07-03 15:21:33.000000000 +0200
@@ -101,11 +101,13 @@
 
     def make_soup(self,data):
         soup = super(FimFictionNetSiteAdapter, self).make_soup(data)
-        for img in soup.find_all('img',{'class':'user_image'}):
+        for img in soup.select('img.lazy-img, img.user_image'):
             ## FimF has started a 'camo' mechanism for images that
             ## gets block by CF.  attr data-source is original source.
             if img.has_attr('data-source'):
                 img['src'] = img['data-source']
+            elif img.has_attr('data-src'):
+                img['src'] = img['data-src']
         return soup
 
     def doExtractChapterUrlsAndMetadata(self,get_cover=True):
@@ -433,4 +435,4 @@
             logger.debug("Next button: " + next_button.get_text())
             if next_button.get_text() or not iterate:
                 return {'urllist': final_urls}
-            url = ('https://' + self.getSiteDomain() + next_button.get('href'))
\ No newline at end of file
+            url = ('https://' + self.getSiteDomain() + next_button.get('href'))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/adapter_royalroadcom.py 
new/FanFicFare-4.47.0/fanficfare/adapters/adapter_royalroadcom.py
--- old/FanFicFare-4.46.0/fanficfare/adapters/adapter_royalroadcom.py   
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/adapters/adapter_royalroadcom.py   
2025-07-03 15:21:33.000000000 +0200
@@ -104,6 +104,43 @@
     def getSiteURLPattern(self):
         return 
"https?"+re.escape("://")+r"(www\.|)royalroadl?\.com/fiction/\d+(/.*)?$"
 
+
+    # rr won't send you future updates if you aren't 'caught up'
+    # on the story.  Login isn't required but logging in will
+    # mark stories you've downloaded as 'read' on rr.
+    def performLogin(self):
+        params = {}
+
+        if self.password:
+            params['Email'] = self.username
+            params['password'] = self.password
+        else:
+            params['Email'] = self.getConfig("username")
+            params['password'] = self.getConfig("password")
+
+        if not params['password']:
+            return
+
+        loginUrl = 'https://' + self.getSiteDomain() + '/account/login'
+        logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
+                                                              params['Email']))
+
+        ## need to pull empty login page first to get request token
+        soup = self.make_soup(self.get_request(loginUrl))
+        ## FYI, this will fail if cookiejar is shared, but
+        ## use_basic_cache is false.
+        params['__RequestVerificationToken']=soup.find('input', 
{'name':'__RequestVerificationToken'})['value']
+
+        d = self.post_request(loginUrl, params)
+
+        if "Sign in" in d : #Member Account
+            logger.info("Failed to login to URL %s as %s" % (loginUrl,
+                                                             params['Email']))
+            raise exceptions.FailedToLogin(self.url,params['urealname'])
+            return False
+        else:
+            return True
+
     ## RR chapter URL only requires the chapter ID number field to be correct, 
story ID and title values are ignored
     ## URL format after the domain /fiction/ is long form, 
storyID/storyTitle/chapter/chapterID/chapterTitle
     ##  short form has /fiction/chapter/chapterID    both forms have optional 
final /
@@ -160,6 +197,9 @@
         url = self.url
         logger.debug("URL: "+url)
 
+        # Log in so site will mark the chapers as read
+        self.performLogin()
+
         data = self.get_request(url)
 
         soup = self.make_soup(data)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/base_adapter.py 
new/FanFicFare-4.47.0/fanficfare/adapters/base_adapter.py
--- old/FanFicFare-4.46.0/fanficfare/adapters/base_adapter.py   2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/adapters/base_adapter.py   2025-07-03 
15:21:33.000000000 +0200
@@ -912,9 +912,9 @@
     def normalize_chapterurl(self,url):
         return url
 
-def cachedfetch(realfetch,cache,url,referer=None):
+def cachedfetch(realfetch,cache,url,referer=None,image=None):
     if url in cache:
         return cache[url]
     else:
-        return realfetch(url,referer=referer)
+        return realfetch(url,referer=referer,image=image)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/base_otw_adapter.py 
new/FanFicFare-4.47.0/fanficfare/adapters/base_otw_adapter.py
--- old/FanFicFare-4.46.0/fanficfare/adapters/base_otw_adapter.py       
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/adapters/base_otw_adapter.py       
2025-07-03 15:21:33.000000000 +0200
@@ -103,7 +103,7 @@
         if self.getConfig("use_archive_transformativeworks_org"):
             logger.warning("Not doing OTW(AO3) login -- doesn't work with 
use_archive_transformativeworks_org")
             return False
-        if self.getConfig("open_pages_in_browser"):
+        if self.getConfig("open_pages_in_browser") and 
self.getConfig("use_browser_cache") and 
self.getConfig("use_browser_cache_only"):
             logger.warning("Not doing OTW(AO3) login -- doesn't work with 
open_pages_in_browser")
             return False
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/adapters/base_xenforo2forum_adapter.py 
new/FanFicFare-4.47.0/fanficfare/adapters/base_xenforo2forum_adapter.py
--- old/FanFicFare-4.46.0/fanficfare/adapters/base_xenforo2forum_adapter.py     
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/adapters/base_xenforo2forum_adapter.py     
2025-07-03 15:21:33.000000000 +0200
@@ -69,7 +69,7 @@
     @classmethod
     def getConfigSections(cls):
         "Only needs to be overriden if has additional ini sections."
-        ## No sites use base_xenforoforum anymore, but 
+        ## No sites use base_xenforoforum anymore, but
         return 
['base_xenforoforum','base_xenforo2forum',cls.getConfigSection()]
 
     @classmethod
@@ -318,14 +318,13 @@
 
     def parse_title(self,souptag):
         h1 = souptag.find('h1',{'class':'p-title-value'})
-        # logger.debug(h1)
-        ## April24 Prefix tags moved back out of title at some
-        ## point. This should probably be somewhere else
-        for tag in souptag.select("div.p-body-header a[href*='prefix_id']"):
-            ## prefixtags included in genre in defaults.ini
+        ## Jun25
+        ## the-sietch still has 'Crossover', 'Sci-Fi' etc spans in the title 
h1.
+        ## Also populated down near other tags for SV/SB/etc
+        for tag in h1.find_all('span',{'class':'label'}):
             self.story.addToList('prefixtags',stripHTML(tag))
-            logger.debug("Prefix tag(%s)"%stripHTML(tag))
-            # tag.extract()
+            # logger.debug(stripHTML(tag))
+            tag.extract()
         self.story.setMetadata('title',stripHTML(h1))
         # logger.debug(stripHTML(h1))
 
@@ -549,7 +548,7 @@
         if self.getConfig('order_threadmarks_by_date') and not 
self.getConfig('order_threadmarks_by_date_categories'):
             threadmarks = sorted(threadmarks, key=lambda x: x['date'])
         return threadmarks
-    
+
     def get_threadmarks_list(self,soupmarks):
         retval = soupmarks.find('div',{'class':'structItemContainer'})
         if retval:
@@ -827,6 +826,11 @@
         if use_threadmark_chaps:
             self.set_threadmarks_metadata(useurl,topsoup)
 
+        for tag in souptag.select("div.p-body-header span.label"): # 
a[href*='prefix_id']"):
+            ## prefixtags included in genre in defaults.ini
+            self.story.addToList('prefixtags',stripHTML(tag))
+            # logger.debug("Prefix tag(%s)"%stripHTML(tag))
+
         if use_threadmark_chaps or self.getConfig('always_use_forumtags'):
             ## only use tags if threadmarks for chapters or 
always_use_forumtags is on.
             tagmap = {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/cli.py new/FanFicFare-4.47.0/fanficfare/cli.py
--- old/FanFicFare-4.46.0/fanficfare/cli.py     2025-06-07 03:02:47.000000000 
+0200
+++ new/FanFicFare-4.47.0/fanficfare/cli.py     2025-07-03 15:21:33.000000000 
+0200
@@ -28,7 +28,7 @@
 import os, sys, platform
 
 
-version="4.46.0"
+version="4.47.0"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/configurable.py 
new/FanFicFare-4.47.0/fanficfare/configurable.py
--- old/FanFicFare-4.46.0/fanficfare/configurable.py    2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/configurable.py    2025-07-03 
15:21:33.000000000 +0200
@@ -218,6 +218,7 @@
                'use_basic_cache':(None,None,boollist),
                'use_nsapa_proxy':(None,None,boollist),
                
'use_flaresolverr_proxy':(None,None,boollist+['withimages','directimages']),
+               'use_flaresolverr_session':(None,None,boollist),
 
                ## currently, browser_cache_path is assumed to be
                ## shared and only ffnet uses it so far
@@ -552,6 +553,8 @@
                  'flaresolverr_proxy_port',
                  'flaresolverr_proxy_protocol',
                  'flaresolverr_proxy_timeout',
+                 'use_flaresolverr_session',
+                 'flaresolverr_session',
                  'browser_cache_path',
                  'browser_cache_age_limit',
                  'user_agent',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/defaults.ini 
new/FanFicFare-4.47.0/fanficfare/defaults.ini
--- old/FanFicFare-4.46.0/fanficfare/defaults.ini       2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/defaults.ini       2025-07-03 
15:21:33.000000000 +0200
@@ -750,7 +750,7 @@
 slow_down_sleep_time:6
 
 ## exclude emoji and default avatars.
-cover_exclusion_regexp:(/styles/|xenforo/avatars/avatar.*\.png|https://cdn\.jsdelivr\.net/gh/|https://cdn\.jsdelivr\.net/emojione)
+cover_exclusion_regexp:(/styles/|xenforo/avatars/avatar.*\.png|https://cdn\.jsdelivr\.net/gh/|https://cdn\.jsdelivr\.net/emojione|/data/svg/2/1/\d+/2022_favicon_[^.]*\.png)
 
 ## use author(original poster)'s avatar as cover image when true.
 author_avatar_cover:false
@@ -859,7 +859,9 @@
 ## there are at least this many threadmarks.  A number of older
 ## threads have a single threadmark to an 'index' post.  Set to 1 to
 ## use threadmarks whenever they exist.
-minimum_threadmarks:2
+## Update Jun2025: Default changed to 1, index posts are not a common
+## thing anymore.
+minimum_threadmarks:1
 
 ## When 'first post' (or post URL) is being added as a chapter, give
 ## the chapter this title.
@@ -3372,6 +3374,15 @@
 ## than other XF2 sites.
 threadmarks_per_page:50
 
+## Using cloudscraper can satisfy the first couple levels of
+## Cloudflare bot-proofing, but not all levels.  Older versions of
+## OpenSSL will also raise problems, so versions of Calibre older than
+## v5 will probably fail.  Only a few sites are configured with
+## use_cloudscraper:true by default, but it can be applied in other
+## sites' ini sections.  user_agent setting is ignored when
+## use_cloudscraper:true
+use_cloudscraper:true
+
 [www.aneroticstory.com]
 use_basic_cache:true
 ## Some sites do not require a login, but do require the user to
@@ -4163,6 +4174,12 @@
 use_basic_cache:true
 extra_valid_entries:stars
 
+## royalroad is a little unusual--it doesn't require user/pass, but the site
+## keeps track of which chapters you've read.  This way, on download,
+## it thinks you're up to date.
+#username:YourName
+#password:yourpassword
+
 #add_to_extra_titlepage_entries:,stars
 
 ## some sites include images that we don't ever want becoming the
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/base_fetcher.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/base_fetcher.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/base_fetcher.py   2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/base_fetcher.py   2025-07-03 
15:21:33.000000000 +0200
@@ -80,11 +80,13 @@
     def set_cookiejar(self,cookiejar):
         self.cookiejar = cookiejar
 
-    def make_headers(self,url,referer=None):
+    def make_headers(self,url,referer=None,image=False):
         headers = {}
         headers['User-Agent']=self.getConfig('user_agent')
         if referer:
             headers['Referer']=referer
+        if image is True:
+            headers["Accept"] = "image/*"
         # if "xf2test" in url:
         #     import base64
         #     base64string = 
base64.encodestring(b"sbreview2019:Fs2PwuVE9").replace(b'\n', b'')
@@ -99,10 +101,11 @@
     def do_request(self, method, url,
                     parameters=None,
                     referer=None,
-                    usecache=True):
+                    usecache=True,
+                    image=False):
         # logger.debug("fetcher do_request")
         # logger.debug(self.get_cookiejar())
-        headers = self.make_headers(url,referer=referer)
+        headers = self.make_headers(url,referer=referer,image=image)
         fetchresp = self.request(method,url,
                                  headers=headers,
                                  parameters=parameters)
@@ -129,10 +132,11 @@
 
     def get_request_redirected(self, url,
                                referer=None,
-                               usecache=True):
+                               usecache=True,
+                               image=False):
         fetchresp = self.do_request('GET',
                                      self.condition_url(url),
                                      referer=referer,
-                                     usecache=usecache)
+                                     usecache=usecache,
+                                     image=image)
         return (fetchresp.content,fetchresp.redirecturl)
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/cache_basic.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/cache_basic.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/cache_basic.py    2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/cache_basic.py    2025-07-03 
15:21:33.000000000 +0200
@@ -103,7 +103,8 @@
                            url,
                            parameters=None,
                            referer=None,
-                           usecache=True):
+                           usecache=True,
+                           image=False):
         '''
         When should cache be cleared or not used? logins, primarily
         Note that usecache=False prevents lookup, but cache still saves
@@ -124,7 +125,8 @@
             url,
             parameters=parameters,
             referer=referer,
-            usecache=usecache)
+            usecache=usecache,
+            image=image)
 
         data = fetchresp.content
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/cache_browser.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/cache_browser.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/cache_browser.py  2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/cache_browser.py  2025-07-03 
15:21:33.000000000 +0200
@@ -54,7 +54,8 @@
                            url,
                            parameters=None,
                            referer=None,
-                           usecache=True):
+                           usecache=True,
+                           image=False):
         with self.cache_lock:
             # logger.debug("BrowserCacheDecorator fetcher_do_request")
             fromcache=True
@@ -121,5 +122,5 @@
                 url,
                 parameters=parameters,
                 referer=referer,
-                usecache=usecache)
-
+                usecache=usecache,
+                image=image)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/decorators.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/decorators.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/decorators.py     2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/decorators.py     2025-07-03 
15:21:33.000000000 +0200
@@ -44,14 +44,16 @@
                            url,
                            parameters=None,
                            referer=None,
-                           usecache=True):
+                           usecache=True,
+                           image=False):
         ## can use fetcher.getConfig()/getConfigList().
         fetchresp = chainfn(
             method,
             url,
             parameters=parameters,
             referer=referer,
-            usecache=usecache)
+            usecache=usecache,
+            image=image)
 
         return fetchresp
 
@@ -63,14 +65,16 @@
                            url,
                            parameters=None,
                            referer=None,
-                           usecache=True):
+                           usecache=True,
+                           image=False):
         # logger.debug("ProgressBarDecorator fetcher_do_request")
         fetchresp = chainfn(
             method,
             url,
             parameters=parameters,
             referer=referer,
-            usecache=usecache)
+            usecache=usecache,
+            image=image)
         ## added ages ago for CLI to give a line of dots showing it's
         ## doing something.
         sys.stdout.write('.')
@@ -97,14 +101,16 @@
                            url,
                            parameters=None,
                            referer=None,
-                           usecache=True):
+                           usecache=True,
+                           image=False):
         # logger.debug("SleepDecorator fetcher_do_request")
         fetchresp = chainfn(
             method,
             url,
             parameters=parameters,
             referer=referer,
-            usecache=usecache)
+            usecache=usecache,
+            image=image)
 
         # don't sleep cached results.  Usually MemCache results will
         # be before sleep, but check fetchresp.fromcache for file://
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/fetcher_cloudscraper.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/fetcher_cloudscraper.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/fetcher_cloudscraper.py   
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/fetcher_cloudscraper.py   
2025-07-03 15:21:33.000000000 +0200
@@ -54,9 +54,10 @@
                 source_address=session.source_address,
                 max_retries=self.retries))
 
-    def make_headers(self,url,referer=None):
+    def make_headers(self,url,referer=None,image=False):
         headers = super(CloudScraperFetcher,self).make_headers(url,
-                                                               referer=referer)
+                                                               referer=referer,
+                                                               image=image)
         ## let cloudscraper do its thing with UA.
         if 'User-Agent' in headers:
             del headers['User-Agent']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/fetchers/fetcher_flaresolverr_proxy.py 
new/FanFicFare-4.47.0/fanficfare/fetchers/fetcher_flaresolverr_proxy.py
--- old/FanFicFare-4.46.0/fanficfare/fetchers/fetcher_flaresolverr_proxy.py     
2025-06-07 03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/fetchers/fetcher_flaresolverr_proxy.py     
2025-07-03 15:21:33.000000000 +0200
@@ -35,7 +35,6 @@
 
 FLARESOLVERR_SESSION="FanFicFareSession"
 ## no convinced this is a good idea yet.
-USE_FS_SESSION=False
 
 class FlareSolverr_ProxyFetcher(RequestsFetcher):
     def __init__(self, getConfig_fn, getConfigList_fn):
@@ -53,7 +52,7 @@
         return retry
 
     def do_fs_request(self, cmd, url=None, headers=None, parameters=None):
-        if USE_FS_SESSION and not self.fs_session:
+        if self.getConfig("use_flaresolverr_session",False) and not 
self.fs_session:
             # manually setting the session causes FS to use that
             # string as the session id.
             resp = self.super_request('POST',
@@ -62,7 +61,7 @@
                                           
':'+self.getConfig("flaresolverr_proxy_port", '8191')+'/v1',
                                       
headers={'Content-Type':'application/json'},
                                       json={'cmd':'sessions.create',
-                                            'session':FLARESOLVERR_SESSION}
+                                            
'session':self.getConfig("flaresolverr_session",FLARESOLVERR_SESSION)}
                                       )
             # XXX check resp for error?  What errors could occur?
             # logger.debug(json.dumps(resp.json, sort_keys=True,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/geturls.py 
new/FanFicFare-4.47.0/fanficfare/geturls.py
--- old/FanFicFare-4.46.0/fanficfare/geturls.py 2025-06-07 03:02:47.000000000 
+0200
+++ new/FanFicFare-4.47.0/fanficfare/geturls.py 2025-07-03 15:21:33.000000000 
+0200
@@ -193,90 +193,93 @@
 
     # logger.debug("get_urls_from_imap srv:(%s)"%srv)
     mail = imaplib.IMAP4_SSL(srv)
-    status = mail.login(user, passwd)
-    if status[0] != 'OK':
-        raise FetchEmailFailed("Failed to login to mail server")
-    # Out: list of "folders" aka labels in gmail.
-    status = mail.list()
-    # logger.debug(status)
-
-    folders = []
     try:
-        for f in status[1]:
-            m = re.match(r'^\(.*\) "?."? "?(?P<folder>.+?)"?$',ensure_str(f))
-            if m:
-                folders.append(m.group("folder").replace("\\",""))
-                # logger.debug(folders[-1])
-            else:
-                logger.warning("Failed to parse IMAP folder 
line(%s)"%ensure_str(f))
-    except:
-        folders = []
-        logger.warning("Failed to parse IMAP folder list, continuing without 
list.")
-
-    if status[0] != 'OK':
-        raise FetchEmailFailed("Failed to list folders on mail server")
-
-    # Needs to be quoted incase there are spaces, etc.  imaplib
-    # doesn't correctly quote folders with spaces.  However, it does
-    # check and won't quote strings that already start and end with ",
-    # so this is safe.  There may be other chars than " that need escaping.
-    status = mail.select('"%s"'%folder.replace('"','\\"'))
-    if status[0] != 'OK':
+        status = mail.login(user, passwd)
+        if status[0] != 'OK':
+            raise FetchEmailFailed("Failed to login to mail server")
+        # Out: list of "folders" aka labels in gmail.
+        status = mail.list()
         # logger.debug(status)
-        if folders:
-            raise FetchEmailFailed("Failed to select folder(%s) on mail server 
(folder list:%s)"%(folder,folders))
-        else:
-            raise FetchEmailFailed("Failed to select folder(%s) on mail 
server"%folder)
 
-    result, data = mail.uid('search', None, "UNSEEN")
+        folders = []
+        try:
+            for f in status[1]:
+                m = re.match(r'^\(.*\) "?."? 
"?(?P<folder>.+?)"?$',ensure_str(f))
+                if m:
+                    folders.append(m.group("folder").replace("\\",""))
+                    # logger.debug(folders[-1])
+                else:
+                    logger.warning("Failed to parse IMAP folder 
line(%s)"%ensure_str(f))
+        except:
+            folders = []
+            logger.warning("Failed to parse IMAP folder list, continuing 
without list.")
+
+        if status[0] != 'OK':
+            raise FetchEmailFailed("Failed to list folders on mail server")
+
+        # Needs to be quoted incase there are spaces, etc.  imaplib
+        # doesn't correctly quote folders with spaces.  However, it does
+        # check and won't quote strings that already start and end with ",
+        # so this is safe.  There may be other chars than " that need escaping.
+        status = mail.select('"%s"'%folder.replace('"','\\"'))
+        if status[0] != 'OK':
+            # logger.debug(status)
+            if folders:
+                raise FetchEmailFailed("Failed to select folder(%s) on mail 
server (folder list:%s)"%(folder,folders))
+            else:
+                raise FetchEmailFailed("Failed to select folder(%s) on mail 
server"%folder)
 
-    #logger.debug("result:%s"%result)
-    #logger.debug("data:%s"%data)
-    urls=set()
+        result, data = mail.uid('search', None, "UNSEEN")
 
-    #latest_email_uid = data[0].split()[-1]
-    for email_uid in data[0].split():
+        #logger.debug("result:%s"%result)
+        #logger.debug("data:%s"%data)
+        urls=set()
 
-        result, data = mail.uid('fetch', email_uid, '(BODY.PEEK[])') #RFC822
+        #latest_email_uid = data[0].split()[-1]
+        for email_uid in data[0].split():
 
-        # logger.debug("result:%s"%result)
-        # logger.debug("data:%s"%data)
+            result, data = mail.uid('fetch', email_uid, '(BODY.PEEK[])') 
#RFC822
 
-        raw_email = data[0][1]
+            # logger.debug("result:%s"%result)
+            # logger.debug("data:%s"%data)
 
-    #raw_email = data[0][1] # here's the body, which is raw text of the whole 
email
-    # including headers and alternate payloads
+            raw_email = data[0][1]
 
-        try:
-            email_message = email.message_from_string(ensure_str(raw_email))
-        except Exception as e:
-            logger.error("Failed decode email message: %s"%e,exc_info=True)
-            continue
-
-        # logger.debug("To:%s"%email_message['To'])
-        # logger.debug("From:%s"%email_message['From'])
-        # logger.debug("Subject:%s"%email_message['Subject'])
-        # logger.debug("payload:%r"%email_message.get_payload(decode=True))
+        #raw_email = data[0][1] # here's the body, which is raw text of the 
whole email
+        # including headers and alternate payloads
 
-        urllist=[]
-        for part in email_message.walk():
             try:
-                # logger.debug("part mime:%s"%part.get_content_type())
-                if part.get_content_type() == 'text/plain':
-                    
urllist.extend(get_urls_from_text(part.get_payload(decode=True),foremail=True, 
normalize=normalize_urls))
-                if part.get_content_type() == 'text/html':
-                    
urllist.extend(get_urls_from_html(part.get_payload(decode=True),foremail=True, 
normalize=normalize_urls))
+                email_message = 
email.message_from_string(ensure_str(raw_email))
             except Exception as e:
-                logger.error("Failed to read email content: 
%s"%e,exc_info=True)
-
-        if urllist and markread:
-            #obj.store(data[0].replace(' ',','),'+FLAGS','\Seen')
-            r,d = mail.uid('store',email_uid,'+FLAGS','(\\SEEN)')
-            #logger.debug("seen result:%s->%s"%(email_uid,r))
-
-        [ urls.add(x) for x in urllist ]
+                logger.error("Failed decode email message: %s"%e,exc_info=True)
+                continue
 
-    return urls
+            # logger.debug("To:%s"%email_message['To'])
+            # logger.debug("From:%s"%email_message['From'])
+            # logger.debug("Subject:%s"%email_message['Subject'])
+            # logger.debug("payload:%r"%email_message.get_payload(decode=True))
+
+            urllist=[]
+            for part in email_message.walk():
+                try:
+                    # logger.debug("part mime:%s"%part.get_content_type())
+                    if part.get_content_type() == 'text/plain':
+                        
urllist.extend(get_urls_from_text(part.get_payload(decode=True),foremail=True, 
normalize=normalize_urls))
+                    if part.get_content_type() == 'text/html':
+                        
urllist.extend(get_urls_from_html(part.get_payload(decode=True),foremail=True, 
normalize=normalize_urls))
+                except Exception as e:
+                    logger.error("Failed to read email content: 
%s"%e,exc_info=True)
+
+            if urllist and markread:
+                #obj.store(data[0].replace(' ',','),'+FLAGS','\Seen')
+                r,d = mail.uid('store',email_uid,'+FLAGS','(\\SEEN)')
+                #logger.debug("seen result:%s->%s"%(email_uid,r))
+
+            [ urls.add(x) for x in urllist ]
+
+        return urls
+    finally:
+        mail.shutdown()
 
 # used by drag-n-drop of email from thunderbird onto Calibre.
 def get_urls_from_mime(mime_data):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/requestable.py 
new/FanFicFare-4.47.0/fanficfare/requestable.py
--- old/FanFicFare-4.46.0/fanficfare/requestable.py     2025-06-07 
03:02:47.000000000 +0200
+++ new/FanFicFare-4.47.0/fanficfare/requestable.py     2025-07-03 
15:21:33.000000000 +0200
@@ -124,9 +124,10 @@
 
     def get_request_raw(self, url,
                         referer=None,
-                        usecache=True): ## referer is used with raw for images.
+                        usecache=True,
+                        image=False): ## referer is used with raw for images.
         return self.configuration.get_fetcher().get_request_redirected(
             self.mod_url_request(url),
             referer=referer,
-            usecache=usecache)[0]
-
+            usecache=usecache,
+            image=image)[0]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/fanficfare/story.py 
new/FanFicFare-4.47.0/fanficfare/story.py
--- old/FanFicFare-4.46.0/fanficfare/story.py   2025-06-07 03:02:47.000000000 
+0200
+++ new/FanFicFare-4.47.0/fanficfare/story.py   2025-07-03 15:21:33.000000000 
+0200
@@ -766,11 +766,13 @@
                                                self.getConfigList)
             def get_request_raw(url,
                                 referer=None,
-                                usecache=True): ## referer is used with raw 
for images.
+                                usecache=True,
+                                image=False): ## referer is used with raw for 
images.
                 return fetcher.get_request_redirected(
                     url,
                     referer=referer,
-                    usecache=usecache)[0]
+                    usecache=usecache,
+                    image=image)[0]
             self.direct_fetcher = get_request_raw
 
     def prepare_replacements(self):
@@ -1647,7 +1649,7 @@
                                   url) ):
                         refererurl = url
                         logger.debug("Use Referer:%s"%refererurl)
-                    imgdata = fetch(imgurl,referer=refererurl)
+                    imgdata = fetch(imgurl,referer=refererurl,image=True)
 
                 if self.no_image_processing(imgurl):
                     (data,ext,mime) = no_convert_image(imgurl,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' --exclude Makefile.in --exclude configure --exclude 
config.guess --exclude '*.pot' --exclude mkinstalldirs --exclude aclocal.m4 
--exclude config.sub --exclude depcomp --exclude install-sh --exclude ltmain.sh 
old/FanFicFare-4.46.0/pyproject.toml new/FanFicFare-4.47.0/pyproject.toml
--- old/FanFicFare-4.46.0/pyproject.toml        2025-06-07 03:02:47.000000000 
+0200
+++ new/FanFicFare-4.47.0/pyproject.toml        2025-07-03 15:21:33.000000000 
+0200
@@ -16,7 +16,7 @@
 #
 # For a discussion on single-sourcing the version, see
 # https://packaging.python.org/guides/single-sourcing-package-version/
-version = "4.46.0"
+version = "4.47.0"
 
 # This is a one-line description or tagline of what your project does. This
 # corresponds to the "Summary" metadata field:

++++++ _scmsync.obsinfo ++++++
--- /var/tmp/diff_new_pack.q8SQDy/_old  2025-07-06 17:17:46.731453853 +0200
+++ /var/tmp/diff_new_pack.q8SQDy/_new  2025-07-06 17:17:46.735454019 +0200
@@ -1,5 +1,5 @@
-mtime: 1749497710
-commit: ed345524b4a4f66f133c69d6dd093e956b8c65bbf3cfee4f7835b27ad667d03b
+mtime: 1751575574
+commit: ae11cd735ca52ea3734aee5edcbdb114ca7678e1f3a777d2fba6fbbd441ad20f
 url: https://src.opensuse.org/mcepl/python-fanficfare.git
 revision: factory
 

++++++ build.specials.obscpio ++++++

Reply via email to