Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2025-12-02 13:20:10
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.14147 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Tue Dec  2 13:20:10 2025 rev:74 rq:1320774 version:4.52.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2025-11-09 21:09:55.088204866 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.14147/python-fanficfare.changes
   2025-12-02 13:21:33.190370556 +0100
@@ -1,0 +2,23 @@
+Mon Dec  1 17:31:25 UTC 2025 - Matej Cepl <[email protected]>
+
+- Update to the version 4.52.0:
+  * adapter_fanfictionsfr: Site SSL requires www now
+  * Remove Site: sinful-dreams.com/whispered/muse - broken for 6+ years
+    even though other two sites on same DN work
+  * Remove site: www.wuxiaworld.xyz - DN parked somewhere questionable
+    for +2 years
+  * Update translations.
+  * Add metadata entry marked_new_chapters for epub updated '(new)'
+    chapters count
+  * adapter_literotica: Get chapters from JSON fetch for #1283
+    (no current examples)
+  * Update QQ reader_posts_per_page default - Thanks, MacaroonRemarkable
+    #1282
+  * Implement Alternate Tagging and Date calculation for Literotica -
+    Thanks, albyofdoom #1280
+  * Add appendices config and improve URL matching for fiction.live
+    adapter - Thanks, MacaroonRemarkable #1279
+  * adapter_royalroadcom: New status Inactive
+  * Fix for add_chapter_numbers:toconly and unnew. Closes #1274
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-4.51.0.tar.gz

New:
----
  FanFicFare-4.52.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.HCMdWw/_old  2025-12-02 13:21:33.814396745 +0100
+++ /var/tmp/diff_new_pack.HCMdWw/_new  2025-12-02 13:21:33.818396913 +0100
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %define skip_python2 1
 Name:           python-fanficfare
-Version:        4.51.0
+Version:        4.52.0
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-4.51.0.tar.gz -> FanFicFare-4.52.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/calibre-plugin/__init__.py 
new/FanFicFare-4.52.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.51.0/calibre-plugin/__init__.py    2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/calibre-plugin/__init__.py    2025-12-01 
15:25:22.000000000 +0100
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 51, 0)
+__version__ = (4, 52, 0)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-4.52.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.51.0/calibre-plugin/plugin-defaults.ini    2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/calibre-plugin/plugin-defaults.ini    2025-12-01 
15:25:22.000000000 +0100
@@ -513,6 +513,13 @@
 ## (new) marks in TOC when mark_new_chapters:true
 #anthology_merge_keepsingletocs:false
 
+## The count of how many chapters are marked '(new)' will be in
+## metadata entry marked_new_chapters
+marked_new_chapters_label:Chapters Marked New
+
+# Add comma separators for numeric reads. Eg 10000 becomes 10,000
+add_to_comma_entries:,marked_new_chapters
+
 ## chapter title patterns use python template substitution.  The
 ## ${number} is the 'chapter' number and ${title} is the chapter
 ## title, after applying chapter_title_strip_pattern.  ${index04} is
@@ -2068,6 +2075,9 @@
 reader_input_label:Reader Input
 keep_in_order_tags:true
 
+# Choose whether to include Appendix chapters
+include_appendices:true
+
 add_to_keep_html_attrs:,style
 
 add_to_output_css:
@@ -2163,6 +2173,9 @@
 ## threadmarks_per_page than other XF2 sites.
 threadmarks_per_page:400
 
+## forum.questionablequesting.com shows more posts per reader page than other 
XF2 sites.
+reader_posts_per_page:30
+
 [forums.spacebattles.com]
 ## see [base_xenforoforum]
 
@@ -2289,6 +2302,22 @@
 ## or just use the text. If this can't be done, the full title is used.
 clean_chapter_titles: false
 
+## For stories, collect tags from individual chapter pages in addition to the
+## series page tags. This allows collection of tags beyond the top 10 on the 
series but 
+## if the author updates tags on a chapter and not the series, those tags may 
persist even if
+## the chapter is not fetched during an update.
+## Default is false to maintain previous behavior.
+tags_from_chapters: false
+
+## For multi-chapter stories (series), use the chapter approval dates for 
datePublished
+## and dateUpdated instead of the series metadata dates. This provides more 
accurate dates
+## based on actual posting dates rather than just when the series metadata 
changes. This
+## method can provide wildly different dates if chapters were written long 
before being
+## approved, if chapters are approved out of order, or if the works were 
approved/updated
+## before literotica's current series system was implemented.
+## Default is false to maintain previous behavior.
+dates_from_chapters: false
+
 ## Some stories mistakenly include 'Ch' or 'Pt' at the end of the
 ## story title. Appears to be a site bug or common author error.  Copy
 ## these to your personal.ini (and uncomment) to correct.
@@ -4508,11 +4537,3 @@
 
 website_encodings:Windows-1252,utf8
 
-[www.wuxiaworld.xyz]
-use_basic_cache:true
-## Was wuxiaworld.co
-## Note that wuxiaworld.co != wuxiaworld.com
-## When dedup_order_chapter_list:true, use a heuristic algorithm
-## specific to wuxiaworld.xyz order and dedup chapters.
-dedup_order_chapter_list:false
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/calibre-plugin/translations/sv.po 
new/FanFicFare-4.52.0/calibre-plugin/translations/sv.po
--- old/FanFicFare-4.51.0/calibre-plugin/translations/sv.po     2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/calibre-plugin/translations/sv.po     2025-12-01 
15:25:22.000000000 +0100
@@ -2,7 +2,7 @@
 # Copyright (C) YEAR ORGANIZATION
 # 
 # Translators:
-# Henrik Mattsson-Mårn <[email protected]>, 2016
+# Henrik Mattsson-Mårn <[email protected]>, 2016,2025
 # J M <[email protected]>, 2016
 # Daniel, 2016-2017,2024
 # efef6ec5b435a041fce803c7f8af77d2_2341d43, 2018-2020
@@ -15,7 +15,7 @@
 "Project-Id-Version: calibre-plugins\n"
 "POT-Creation-Date: 2025-07-03 08:18-0500\n"
 "PO-Revision-Date: 2014-06-19 22:55+0000\n"
-"Last-Translator: Daniel, 2016-2017,2024\n"
+"Last-Translator: Henrik Mattsson-Mårn <[email protected]>, 2016,2025\n"
 "Language-Team: Swedish 
(http://app.transifex.com/calibre/calibre-plugins/language/sv/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
@@ -316,7 +316,7 @@
 
 #: config.py:576
 msgid "Chapter Error"
-msgstr ""
+msgstr "Kapitelfel"
 
 #: config.py:577
 msgid ""
@@ -1739,7 +1739,7 @@
 
 #: dialogs.py:1710
 msgid "Show this confirmation again"
-msgstr ""
+msgstr "Visa denna bekräftelse igen"
 
 #: fff_plugin.py:146 fff_plugin.py:178 fff_plugin.py:491
 msgid "FanFicFare"
@@ -1937,7 +1937,7 @@
 #: fff_plugin.py:659 fff_plugin.py:2078 fff_plugin.py:2745 fff_plugin.py:2757
 #: fff_plugin.py:2768 fff_plugin.py:2774 fff_plugin.py:2787
 msgid "Warning"
-msgstr ""
+msgstr "Varning"
 
 #: fff_plugin.py:667
 msgid "(%d Story URLs Skipped, on Rejected URL List)"
@@ -2084,7 +2084,7 @@
 
 #: fff_plugin.py:1002 fff_plugin.py:1003
 msgid "Cannot Update Anthology"
-msgstr ""
+msgstr "Kan inte uppdatera antologi"
 
 #: fff_plugin.py:1003
 msgid ""
@@ -2140,7 +2140,7 @@
 
 #: fff_plugin.py:1179
 msgid "Queued download for"
-msgstr ""
+msgstr "Köade nedladdning för"
 
 #: fff_plugin.py:1181
 msgid "Started fetching metadata for %s stories."
@@ -2366,7 +2366,7 @@
 
 #: fff_plugin.py:1856
 msgid "Info"
-msgstr ""
+msgstr "Info"
 
 #: fff_plugin.py:1903 fff_plugin.py:1913
 msgid "Error Updating Metadata"
@@ -2446,7 +2446,7 @@
 
 #: fff_plugin.py:2114
 msgid "Downloading from %s"
-msgstr ""
+msgstr "Ladda ner från %s"
 
 #: fff_plugin.py:2117
 msgid "Proceed with updating your library?"
@@ -2631,7 +2631,7 @@
 
 #: prefs.py:79
 msgid "Yes, Always"
-msgstr ""
+msgstr "Ja, alltid"
 
 #: prefs.py:81
 msgid "Yes, if EPUB has a cover image"
@@ -2647,4 +2647,4 @@
 
 #: prefs.py:87
 msgid "No"
-msgstr ""
+msgstr "Nej"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/adapters/__init__.py 
new/FanFicFare-4.52.0/fanficfare/adapters/__init__.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/__init__.py       2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/__init__.py       2025-12-01 
15:25:22.000000000 +0100
@@ -106,7 +106,6 @@
 from . import adapter_trekfanfictionnet
 from . import adapter_wwwutopiastoriescom
 from . import adapter_sinfuldreamscomunicornfic
-from . import adapter_sinfuldreamscomwhisperedmuse
 from . import adapter_sinfuldreamscomwickedtemptation
 from . import adapter_asianfanficscom
 from . import adapter_mttjustoncenet
@@ -119,7 +118,6 @@
 from . import adapter_wattpadcom
 from . import adapter_novelonlinefullcom
 from . import adapter_wwwnovelallcom
-from . import adapter_wuxiaworldxyz
 from . import adapter_hentaifoundrycom
 from . import adapter_mugglenetfanfictioncom
 from . import adapter_swiorgru
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_fanfictionsfr.py 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_fanfictionsfr.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/adapter_fanfictionsfr.py  
2025-11-07 16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/adapter_fanfictionsfr.py  
2025-12-01 15:25:22.000000000 +0100
@@ -52,11 +52,11 @@
         self.story.setMetadata('storyId', story_id)
         fandom_name = match.group('fandom')
 
-        self._setURL('https://www.%s/fanfictions/%s/%s/chapters.html' % 
(self.getSiteDomain(), fandom_name, story_id))
+        self._setURL('https://%s/fanfictions/%s/%s/chapters.html' % 
(self.getSiteDomain(), fandom_name, story_id))
 
     @staticmethod
     def getSiteDomain():
-        return 'fanfictions.fr'
+        return 'www.fanfictions.fr'
 
     @classmethod
     def getSiteExampleURLs(cls):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_fictionlive.py 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_fictionlive.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/adapter_fictionlive.py    
2025-11-07 16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/adapter_fictionlive.py    
2025-12-01 15:25:22.000000000 +0100
@@ -222,6 +222,8 @@
         ## api url to get content of a multi route chapter. requires only the 
route id and no timestamps
         route_chunkrange_url = 
"https://fiction.live/api/anonkun/route/{c_id}/chapters";
 
+        self.chapter_id_to_api = {}
+
         def add_chapter_url(title, bounds):
             "Adds a chapter url based on the start/end chunk-range timestamps."
             start, end = bounds
@@ -239,6 +241,17 @@
             a, b = itertools.tee(iterable, 2)
             next(b, None)
             return list(zip(a, b))
+        
+        def map_chap_ids_to_api(chapter_ids, route_ids, times):
+            for index, bounds in enumerate(times):
+                start, end = bounds
+                end -= 1
+                chapter_url = chunkrange_url.format(s_id = data['_id'], start 
= start, end = end)
+                self.chapter_id_to_api[chapter_ids[index]] = chapter_url
+            
+            for route_id in route_ids:
+                chapter_url = route_chunkrange_url.format(c_id = route_id)
+                self.chapter_id_to_api[route_id] = chapter_url
 
         ## first thing to do is seperate out the appendices
         appendices, maintext, routes = [], [], []
@@ -260,22 +273,25 @@
         ## main-text chapter extraction processing. *should* now handle all 
the edge cases.
         ## relies on fanficfare ignoring empty chapters!
 
-        titles = [c['title'] for c in maintext]
-        titles = ["Home"] + titles
+        titles = ["Home"] + [c['title'] for c in maintext]
+        chapter_ids = ['home'] + [c['id'] for c in maintext]
+        times = [data['ct']] + [c['ct'] for c in maintext] + 
[self.most_recent_chunk + 2] # need to be 1 over, and add_url etc does -1
+        times = pair(times)
+
+        if self.getConfig('include_appendices', True): # Add appendices after 
main text if desired
+            titles = titles + ["Appendix: " + a['title'][9:] for a in 
appendices]
+            chapter_ids = chapter_ids + [a['id'] for a in appendices]
+            times = times + [(a['ct'], a['ct'] + 2) for a in appendices]
 
-        times = [c['ct'] for c in maintext]
-        times = [data['ct']] + times + [self.most_recent_chunk + 2] # need to 
be 1 over, and add_url etc does -1
+        route_ids = [r['id'] for r in routes]
 
-        # doesn't actually run without the call to list.
-        list(map(add_chapter_url, titles, pair(times)))
+        map_chap_ids_to_api(chapter_ids, route_ids, times) # Map chapter ids 
to API URLs for use when comparing the two
 
-        for a in appendices: # add appendices afterwards
-            chapter_start = a['ct']
-            chapter_title = "Appendix: " + a['title'][9:] # 'Appendix: ' 
rather than '#special' at beginning of name
-            add_chapter_url(chapter_title, (chapter_start, chapter_start + 2)) 
# 1 msec range = this one chunk only
+        # doesn't actually run without the call to list.
+        list(map(add_chapter_url, titles, times))
 
         for r in routes:  # add route at the end, after appendices
-            route_id = r['id']  # to get route chapter content, the route id 
is needed, not the timestamp
+            route_id = r['id'] # to get route chapter content, the route id is 
needed, not the timestamp
             chapter_title = "Route: " + r['title']  # 'Route: ' at beginning 
of name, since it's a multiroute chapter
             add_route_chapter_url(chapter_title, route_id)
 
@@ -531,6 +547,35 @@
 
         return output
 
+    def normalize_chapterurl(self, url):
+        if url.startswith(r'https://fiction.live/api/anonkun/chapters'):
+            return url
+        
+        pattern = None
+
+        if url.startswith(r'https://fiction.live/api/anonkun/route'):
+            pattern = 
r"https?://(?:beta\.)?fiction\.live/[^/]*/[^/]*/[a-zA-Z0-9]+/routes/([a-zA-Z0-9]+)"
+        elif url.startswith(r'https://fiction.live/'):
+            pattern = 
r"https?://(?:beta\.)?fiction\.live/[^/]*/[^/]*/[a-zA-Z0-9]+/[^/]*(/[a-zA-Z0-9]+|home)"
+            # regex101 rocks
+
+        if not pattern:
+            return url
+
+        match = re.match(pattern, url)
+        if not match:
+            return url
+        
+        chapter_id = match.group(1)
+
+        if chapter_id.startswith('/'):
+            chapter_id = chapter_id[1:]
+
+        if chapter_id and chapter_id in self.chapter_id_to_api:
+            return self.chapter_id_to_api[chapter_id]
+        
+        return url
+    
     def format_unknown(self, chunk):
         raise NotImplementedError("Unknown chunk type ({}) in fiction.live 
story.".format(chunk))
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_literotica.py 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_literotica.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/adapter_literotica.py     
2025-11-07 16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/adapter_literotica.py     
2025-12-01 15:25:22.000000000 +0100
@@ -95,6 +95,49 @@
         self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[-1])
         # logger.debug("language:%s"%self.story.getMetadata('language'))
 
+    ## apply clean_chapter_titles
+    def add_chapter(self,chapter_title,url,othermeta={}):
+        if self.getConfig("clean_chapter_titles"):
+            storytitle = self.story.getMetadata('title').lower()
+            chapter_name_type = None
+            # strip trailing ch or pt before doing the chapter clean.
+            # doesn't remove from story title metadata
+            storytitle = re.sub(r'^(.*?)( (ch|pt))?$',r'\1',storytitle)
+            if chapter_title.lower().startswith(storytitle):
+                chapter = chapter_title[len(storytitle):].strip()
+                # logger.debug('\tChapter: "%s"' % chapter)
+                if chapter == '':
+                    chapter_title = 'Chapter %d' % (self.num_chapters() + 1)
+                    # Sometimes the first chapter does not have type of chapter
+                    if self.num_chapters() == 0:
+                        # logger.debug('\tChapter: first chapter without 
chapter type')
+                        chapter_name_type = None
+                else:
+                    separater_char = chapter[0]
+                    # logger.debug('\tseparater_char: "%s"' % separater_char)
+                    chapter = chapter[1:].strip() if separater_char in [":", 
"-"] else chapter
+                    # logger.debug('\tChapter: "%s"' % chapter)
+                    if chapter.lower().startswith('ch.'):
+                        chapter = chapter[len('ch.'):].strip()
+                        try:
+                            chapter_title = 'Chapter %d' % int(chapter)
+                        except:
+                            chapter_title = 'Chapter %s' % chapter
+                        chapter_name_type = 'Chapter' if chapter_name_type is 
None else chapter_name_type
+                        # logger.debug('\tChapter: chapter_name_type="%s"' % 
chapter_name_type)
+                    elif chapter.lower().startswith('pt.'):
+                        chapter = chapter[len('pt.'):].strip()
+                        try:
+                            chapter_title = 'Part %d' % int(chapter)
+                        except:
+                            chapter_title = 'Part %s' % chapter
+                        chapter_name_type = 'Part' if chapter_name_type is 
None else chapter_name_type
+                        # logger.debug('\tChapter: chapter_name_type="%s"' % 
chapter_name_type)
+                    elif separater_char in [":", "-"]:
+                        chapter_title = chapter
+                        # logger.debug('\tChapter: taking chapter text as 
whole')
+        super(LiteroticaSiteAdapter, 
self).add_chapter(chapter_title,url,othermeta)
+
     def extractChapterUrlsAndMetadata(self):
         """
         In April 2024, site introduced significant changes, including
@@ -182,12 +225,14 @@
         else: # if all else fails
             self.story.setMetadata('authorId', stripHTML(authora))
 
-        if soup.select('div#tabpanel-tags'):
-            # logger.debug("tags1")
-            self.story.extendList('eroticatags', [ stripHTML(t).title() for t 
in soup.select('div#tabpanel-tags a.av_as') ])
-        if soup.select('div[class^="_widget__tags_"]'):
-            # logger.debug("tags2")
-            self.story.extendList('eroticatags', [ stripHTML(t).title() for t 
in soup.select('div[class^="_widget__tags_"] a[class^="_tags__link_"]') ])
+        ## Collect tags from series/story page if tags_from_chapters is enabled
+        if self.getConfig("tags_from_chapters"):
+            if soup.select('div#tabpanel-tags'):
+                # logger.debug("tags1")
+                self.story.extendList('eroticatags', [ stripHTML(t).title() 
for t in soup.select('div#tabpanel-tags a.av_as') ])
+            if soup.select('div[class^="_widget__tags_"]'):
+                # logger.debug("tags2")
+                self.story.extendList('eroticatags', [ stripHTML(t).title() 
for t in soup.select('div[class^="_widget__tags_"] a[class^="_tags__link_"]') ])
         # logger.debug(self.story.getList('eroticatags'))
 
         ## look first for 'Series Introduction', then Info panel short desc
@@ -250,7 +295,8 @@
             ## Multi-chapter stories.  AKA multi-part 'Story Series'.
             bn_antags = soup.select('div#tabpanel-info p.bn_an')
             # logger.debug(bn_antags)
-            if bn_antags:
+            if bn_antags and not self.getConfig("dates_from_chapters"):
+                ## Use dates from series metadata unless dates_from_chapters 
is enabled
                 dates = []
                 for datetag in bn_antags[:2]:
                     datetxt = stripHTML(datetag)
@@ -272,52 +318,11 @@
             ## category from chapter list
             self.story.extendList('category',[ stripHTML(t) for t in 
soup.select('a.br_rl') ])
 
-            storytitle = self.story.getMetadata('title').lower()
-            chapter_name_type = None
             for chapteratag in soup.select('a.br_rj'):
                 chapter_title = stripHTML(chapteratag)
                 # logger.debug('\tChapter: "%s"' % chapteratag)
-                if self.getConfig("clean_chapter_titles"):
-                    # strip trailing ch or pt before doing the chapter clean.
-                    # doesn't remove from story title metadata
-                    storytitle = re.sub(r'^(.*?)( (ch|pt))?$',r'\1',storytitle)
-                    if chapter_title.lower().startswith(storytitle):
-                        chapter = chapter_title[len(storytitle):].strip()
-                        # logger.debug('\tChapter: "%s"' % chapter)
-                        if chapter == '':
-                            chapter_title = 'Chapter %d' % 
(self.num_chapters() + 1)
-                            # Sometimes the first chapter does not have type 
of chapter
-                            if self.num_chapters() == 0:
-                                # logger.debug('\tChapter: first chapter 
without chapter type')
-                                chapter_name_type = None
-                        else:
-                            separater_char = chapter[0]
-                            # logger.debug('\tseparater_char: "%s"' % 
separater_char)
-                            chapter = chapter[1:].strip() if separater_char in 
[":", "-"] else chapter
-                            # logger.debug('\tChapter: "%s"' % chapter)
-                            if chapter.lower().startswith('ch.'):
-                                chapter = chapter[len('ch.'):].strip()
-                                try:
-                                    chapter_title = 'Chapter %d' % int(chapter)
-                                except:
-                                    chapter_title = 'Chapter %s' % chapter
-                                chapter_name_type = 'Chapter' if 
chapter_name_type is None else chapter_name_type
-                                # logger.debug('\tChapter: 
chapter_name_type="%s"' % chapter_name_type)
-                            elif chapter.lower().startswith('pt.'):
-                                chapter = chapter[len('pt.'):].strip()
-                                try:
-                                    chapter_title = 'Part %d' % int(chapter)
-                                except:
-                                    chapter_title = 'Part %s' % chapter
-                                chapter_name_type = 'Part' if 
chapter_name_type is None else chapter_name_type
-                                # logger.debug('\tChapter: 
chapter_name_type="%s"' % chapter_name_type)
-                            elif separater_char in [":", "-"]:
-                                chapter_title = chapter
-                                # logger.debug('\tChapter: taking chapter text 
as whole')
-
                 # /series/se does include full URLs current.
                 chapurl = chapteratag['href']
-
                 # logger.debug("Chapter URL: " + chapurl)
                 self.add_chapter(chapter_title, chapurl)
 
@@ -327,6 +332,7 @@
                 self.setCoverImage(self.url,coverimg['src'])
 
         #### Attempting averrating from JS metadata.
+        #### also alternate chapters from json
         try:
             state_start="state='"
             state_end="'</script>"
@@ -345,10 +351,43 @@
                     ## series
                     elif 'series' in json_state:
                         all_rates = [ float(x['rate_all']) for x in 
json_state['series']['works'] ]
+
+                        ## Extract dates from chapter approval dates if 
dates_from_chapters is enabled
+                        if self.getConfig("dates_from_chapters"):
+                            date_approvals = []
+                            for work in json_state['series']['works']:
+                                if 'date_approve' in work:
+                                    try:
+                                        
date_approvals.append(makeDate(work['date_approve'], self.dateformat))
+                                    except:
+                                        pass
+                            if date_approvals:
+                                # Oldest date is published, newest is updated
+                                date_approvals.sort()
+                                self.story.setMetadata('datePublished', 
date_approvals[0])
+                                self.story.setMetadata('dateUpdated', 
date_approvals[-1])
                     if all_rates:
                         self.story.setMetadata('averrating', '%4.2f' % 
(sum(all_rates) / float(len(all_rates))))
+
+                    ## alternate chapters from JSON
+                    if self.num_chapters() < 1:
+                        logger.debug("Getting Chapters from series JSON")
+                        seriesid = 
json_state.get('series',{}).get('coversSeriesId',None)
+                        if seriesid:
+                            logger.info("Fetching chapter data from JSON")
+                            logger.debug(seriesid)
+                            series_json = 
json.loads(self.get_request('https://literotica.com/api/3/series/%s/works'%seriesid))
+                            # logger.debug(json.dumps(series_json, 
sort_keys=True,indent=2, separators=(',', ':')))
+                            for chap in series_json:
+                                self.add_chapter(chap['title'], 
'https://www.literotica.com/s/'+chap['url'])
+
+                                ## Collect tags from series/story page if 
tags_from_chapters is enabled
+                                if self.getConfig("tags_from_chapters"):
+                                    self.story.extendList('eroticatags', [ 
stripHTML(t['tag']).title() for t in chap['tags'] ])
+
+
         except Exception as e:
-            logger.debug("Processing JSON to find averrating failed. (%s)"%e)
+            logger.warning("Processing JSON failed. (%s)"%e)
 
         ## Features removed because not supportable by new site form:
         ## averrating metadata entry
@@ -507,7 +546,7 @@
         import json
         last_page = int(js_story_list.group('last_page'))
         current_page = int(js_story_list.group('current_page')) + 1
-        # Fetching the remaining urls from api. Can't trust the number given 
about the pages left from a website. Sometimes even the api returns outdated 
number of pages. 
+        # Fetching the remaining urls from api. Can't trust the number given 
about the pages left from a website. Sometimes even the api returns outdated 
number of pages.
         while current_page <= last_page:
             i = len(urls)
             logger.debug("Pages %s/%s"%(current_page, int(last_page)))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_royalroadcom.py 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_royalroadcom.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/adapter_royalroadcom.py   
2025-11-07 16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/adapter_royalroadcom.py   
2025-12-01 15:25:22.000000000 +0100
@@ -267,6 +267,8 @@
                 self.story.setMetadata('status', 'Stub')
             elif 'DROPPED' == label:
                 self.story.setMetadata('status', 'Dropped')
+            elif 'INACTIVE' == label:
+                self.story.setMetadata('status', 'Inactive')
             elif 'Fan Fiction' == label:
                 self.story.addToList('category', 'FanFiction')
             elif 'Original' == label:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_sinfuldreamscomwhisperedmuse.py
 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_sinfuldreamscomwhisperedmuse.py
--- 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_sinfuldreamscomwhisperedmuse.py
   2025-11-07 16:53:24.000000000 +0100
+++ 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_sinfuldreamscomwhisperedmuse.py
   1970-01-01 01:00:00.000000000 +0100
@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2011 Fanficdownloader team, 2018 FanFicFare team
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Software: eFiction
-from __future__ import absolute_import
-from .base_efiction_adapter import BaseEfictionAdapter
-
-class SinfulDreamsComWhisperedMuse(BaseEfictionAdapter):
-
-    @staticmethod
-    def getSiteDomain():
-        return 'sinful-dreams.com'
-
-    @classmethod
-    def getPathToArchive(self):
-        return '/whispered/muse'
-
-    @classmethod
-    def getConfigSection(cls):
-        "Overriden because [domain/path] section for multiple-adapter domain."
-        return cls.getSiteDomain()+cls.getPathToArchive()
-
-    @classmethod
-    def getSiteAbbrev(self):
-        return 'snfldrms-wm'
-
-    @classmethod
-    def getDateFormat(self):
-        return "%m/%d/%Y"
-
-def getClass():
-    return SinfulDreamsComWhisperedMuse
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.51.0/fanficfare/adapters/adapter_wuxiaworldxyz.py 
new/FanFicFare-4.52.0/fanficfare/adapters/adapter_wuxiaworldxyz.py
--- old/FanFicFare-4.51.0/fanficfare/adapters/adapter_wuxiaworldxyz.py  
2025-11-07 16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/adapters/adapter_wuxiaworldxyz.py  
1970-01-01 01:00:00.000000000 +0100
@@ -1,171 +0,0 @@
-#  -*- coding: utf-8 -*-
-
-# Copyright 2020 FanFicFare team
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-from __future__ import absolute_import
-import logging
-import re
-# py2 vs py3 transition
-from ..six.moves.urllib import parse as urlparse
-
-from .base_adapter import BaseSiteAdapter, makeDate
-from fanficfare.htmlcleanup import stripHTML
-from .. import exceptions as exceptions
-
-logger = logging.getLogger(__name__)
-
-
-def getClass():
-    return WuxiaWorldXyzSiteAdapter
-
-
-class WuxiaWorldXyzSiteAdapter(BaseSiteAdapter):
-    DATE_FORMAT = '%Y-%m-%d %H:%M'
-
-    def __init__(self, config, url):
-        BaseSiteAdapter.__init__(self, config, url)
-        self.story.setMetadata('siteabbrev', 'wuxco')
-
-        # get storyId from url--url validation guarantees query correct
-        match = re.match(self.getSiteURLPattern(), url)
-        if not match:
-            raise exceptions.InvalidStoryURL(url, self.getSiteDomain(), 
self.getSiteExampleURLs())
-
-        story_id = match.group('id')
-        self.story.setMetadata('storyId', story_id)
-        self._setURL('https://%s/%s/' % (self.getSiteDomain(), story_id))
-
-    @staticmethod
-    def getSiteDomain():
-        return 'www.wuxiaworld.xyz'
-
-    @classmethod
-    def getAcceptDomains(cls):
-        return 
['www.wuxiaworld.xyz','m.wuxiaworld.xyz','www.wuxiaworld.co','m.wuxiaworld.co']
-
-    @classmethod
-    def getConfigSections(cls):
-        "Only needs to be overriden if has additional ini sections."
-        return cls.getAcceptDomains()
-
-    @classmethod
-    def getSiteExampleURLs(cls):
-        return 'https://%s/story-name' % cls.getSiteDomain()
-
-    def getSiteURLPattern(self):
-        return r'https?://(www|m)\.wuxiaworld\.(xyz|co)/(?P<id>[^/]+)(/)?'
-
-    def extractChapterUrlsAndMetadata(self):
-        logger.debug('URL: %s', self.url)
-
-        data = self.get_request(self.url)
-
-        soup = self.make_soup(data)
-
-        self.setCoverImage(self.url, soup.select_one('img.cover')['src'])
-
-        author = soup.select_one('div.info div span').get_text()
-        self.story.setMetadata('title', soup.select_one('h3.title').get_text())
-        self.story.setMetadata('author', author)
-        self.story.setMetadata('authorId', author)
-        ## site doesn't have authorUrl links.
-
-        ## getting status
-        status_label = soup.find('h3',string='Status:')
-        status = stripHTML(status_label.nextSibling)
-        if status == 'Completed':
-            self.story.setMetadata('status', 'Completed')
-        else:
-            self.story.setMetadata('status', 'In-Progress')
-
-        ### No dates given now?
-        # chapter_info = soup.select_one('.chapter-wrapper')
-        # date = makeDate(chapter_info.select_one('.update-time').get_text(), 
self.DATE_FORMAT)
-        # if date:
-        #     self.story.setMetadata('dateUpdated', date)
-
-        intro = soup.select_one('div.desc-text')
-        if intro.strong:
-            intro.strong.decompose()
-        self.setDescription(self.url, intro)
-
-        def get_chapters(toc_soup):
-            chapter_info = toc_soup.select_one('div#list-chapter')
-            return [ ch for ch in chapter_info.select('li span ~ a')
-                     if not (ch.has_attr('style') and 'color:Gray;' not in 
ch('style')) ]
-
-        ## skip grayed out "In preparation" chapters -- couldn't make
-        ## the :not() work in the same select.
-        chapters = get_chapters(soup)
-
-        next_toc_page_url = soup.select_one('li.next a')
-        while next_toc_page_url:
-            logger.debug("TOC list next page: %s"%next_toc_page_url['href'])
-            toc_soup = self.make_soup(self.get_request('https://%s%s' % 
(self.getSiteDomain(),next_toc_page_url['href'])))
-            chapters.extend(get_chapters(toc_soup))
-            next_toc_page_url = toc_soup.select_one('li.next a')
-
-        if self.getConfig("dedup_order_chapter_list",False):
-            # Sort and deduplicate chapters (some stories in incorrect order 
and/or duplicates)
-            chapters_data = []
-            numbers_regex = re.compile(r'[^0-9\.]') # Everything except 
decimal and numbers
-            for ch in chapters:
-                chapter_title = stripHTML(ch)
-                chapter_url = ch['href']
-                if chapter_title.startswith('Chapter'):
-                    target_number = chapter_title.split()[1]
-                else:
-                    target_number = chapter_title.split()[0]
-                try:
-                    number = float(re.sub(numbers_regex, '', target_number))
-                except:
-                    continue # Cannot parse chapter number
-                chapters_data.append((number, chapter_title, chapter_url))
-
-            chapters_data.sort(key=lambda ch: ch[0])
-
-            for index, chapter in enumerate(chapters_data):
-                if index > 0:
-                    # No previous duplicate chapter names or same chapter 
numbers
-                    if chapter[1] == chapters_data[index-1][1] or chapter[0] 
== chapters_data[index-1][0]:
-                        continue
-                title = chapter[1]
-                url = urlparse.urljoin(self.url, chapter[2])
-                self.add_chapter(title, url)
-        else:
-            ## normal operation
-            for ch in chapters:
-                self.add_chapter(stripHTML(ch), urlparse.urljoin(self.url, 
ch['href']))
-
-    def getChapterText(self, url):
-        logger.debug('Getting chapter text from: %s', url)
-        data = self.get_request(url)
-        soup = self.make_soup(data)
-
-        content = soup.select_one('div#chapter-content')
-
-        ## Remove
-        # <div align="left">
-        #     If you find any errors ( broken links, non-standard content, 
etc.. ), Please let
-        #     us know
-        #     &lt; report chapter &gt; so we can fix it as soon as possible.
-        # </div>
-        report_div = content.select_one('div:last-child')
-        if 'broken links, non-standard content, etc' in stripHTML(report_div):
-            report_div.decompose()
-
-        return self.utf8FromSoup(url, content)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/cli.py 
new/FanFicFare-4.52.0/fanficfare/cli.py
--- old/FanFicFare-4.51.0/fanficfare/cli.py     2025-11-07 16:53:24.000000000 
+0100
+++ new/FanFicFare-4.52.0/fanficfare/cli.py     2025-12-01 15:25:22.000000000 
+0100
@@ -28,7 +28,7 @@
 import os, sys, platform
 
 
-version="4.51.0"
+version="4.52.0"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/configurable.py 
new/FanFicFare-4.52.0/fanficfare/configurable.py
--- old/FanFicFare-4.51.0/fanficfare/configurable.py    2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/configurable.py    2025-12-01 
15:25:22.000000000 +0100
@@ -95,6 +95,7 @@
     'numChapters':'Chapters',
     'numWords':'Words',
     'words_added':'Words Added', # logpage only
+    'marked_new_chapters':'Chapters Marked New',
     'site':'Site',
     'publisher':'Publisher',
     'storyId':'Story ID',
@@ -239,6 +240,8 @@
                'skip_author_cover':(ffnet_list,None,boollist),
                'try_shortened_title_urls':(['fanfiction.net'],None,boollist),
 
+               'include_appendices':(['fiction.live'],None,boollist),
+
                'fix_fimf_blockquotes':(['fimfiction.net'],None,boollist),
                
'keep_prequel_in_description':(['fimfiction.net'],None,boollist),
                
'scrape_bookshelf':(['fimfiction.net'],None,boollist+['legacy']),
@@ -266,6 +269,8 @@
                'description_in_chapter':(['literotica.com'],None,boollist),
                'fetch_stories_from_api':(['literotica.com'],None,boollist),
                'order_chapters_by_date':(['literotica.com'],None,boollist),
+               'tags_from_chapters':(['literotica.com'],None,boollist),
+               'dates_from_chapters':(['literotica.com'],None,boollist),
 
                
'inject_chapter_title':(['asianfanfics.com']+wpc_list,None,boollist),
                'inject_chapter_image':(['asianfanfics.com'],None,boollist),
@@ -343,6 +348,7 @@
                  'numChapters',
                  'numWords',
                  'words_added', # logpage only.
+                 'marked_new_chapters',
                  'site',
                  'publisher',
                  'storyId',
@@ -520,6 +526,8 @@
                  'description_in_chapter',
                  'order_chapters_by_date',
                  'fetch_stories_from_api',
+                 'tags_from_chapters',
+                 'dates_from_chapters',
                  'inject_chapter_title',
                  'inject_chapter_image',
                  'append_datepublished_to_storyurl',
@@ -605,6 +613,7 @@
                  'fix_excess_space',
                  'dedup_order_chapter_list',
                  'ignore_chapter_url_list',
+                 'include_appendices',
                  'dedup_chapter_list',
                  'show_timestamps',
                  'show_nsfw_cover_images',
@@ -1084,7 +1093,8 @@
             if self.getConfig('use_flaresolverr_proxy',False):
                 
logger.debug("use_flaresolverr_proxy:%s"%self.getConfig('use_flaresolverr_proxy'))
                 fetchcls = fetcher_flaresolverr_proxy.FlareSolverr_ProxyFetcher
-                if (self.getConfig('use_flaresolverr_proxy') != 'withimages' 
and
+                if (self.getConfig('include_images') and
+                    self.getConfig('use_flaresolverr_proxy') != 'withimages' 
and
                     self.getConfig('use_flaresolverr_proxy') != 
'directimages') and not self.getConfig('use_browser_cache'):
                     logger.warning("FlareSolverr v2+ doesn't work with images: 
include_images automatically set false")
                     logger.warning("Set use_flaresolverr_proxy:withimages if 
your are using FlareSolver v1 and want images")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/defaults.ini 
new/FanFicFare-4.52.0/fanficfare/defaults.ini
--- old/FanFicFare-4.51.0/fanficfare/defaults.ini       2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/defaults.ini       2025-12-01 
15:25:22.000000000 +0100
@@ -483,6 +483,13 @@
 ## mark_new_chapters can be true, false or latestonly
 mark_new_chapters:false
 
+## The count of how many chapters are marked '(new)' will be in
+## metadata entry marked_new_chapters
+marked_new_chapters_label:Chapters Marked New
+
+# Add comma separators for numeric reads. Eg 10000 becomes 10,000
+add_to_comma_entries:,marked_new_chapters
+
 ## chapter title patterns use python template substitution.  The
 ## ${number} is the 'chapter' number and ${title} is the chapter
 ## title, after applying chapter_title_strip_pattern.  ${index04} is
@@ -2061,6 +2068,9 @@
 reader_input_label:Reader Input
 keep_in_order_tags:true
 
+# Choose whether to include Appendix chapters
+include_appendices:true
+
 add_to_keep_html_attrs:,style
 
 add_to_output_css:
@@ -2156,6 +2166,9 @@
 ## threadmarks_per_page than other XF2 sites.
 threadmarks_per_page:400
 
+## forum.questionablequesting.com shows more posts per reader page than other 
XF2 sites.
+reader_posts_per_page:30
+
 [forums.spacebattles.com]
 ## see [base_xenforoforum]
 
@@ -2282,6 +2295,22 @@
 ## or just use the text. If this can't be done, the full title is used.
 clean_chapter_titles: false
 
+## For stories, collect tags from individual chapter pages in addition to the
+## series page tags. This allows collection of tags beyond the top 10 on the 
series but 
+## if the author updates tags on a chapter and not the series, those tags may 
persist even if
+## the chapter is not fetched during an update.
+## Default is false to maintain previous behavior.
+tags_from_chapters: false
+
+## For multi-chapter stories (series), use the chapter approval dates for 
datePublished
+## and dateUpdated instead of the series metadata dates. This provides more 
accurate dates
+## based on actual posting dates rather than just when the series metadata 
changes. This
+## method can provide wildly different dates if chapters were written long 
before being
+## approved, if chapters are approved out of order, or if the works were 
approved/updated
+## before literotica's current series system was implemented.
+## Default is false to maintain previous behavior.
+dates_from_chapters: false
+
 ## Some stories mistakenly include 'Ch' or 'Pt' at the end of the
 ## story title. Appears to be a site bug or common author error.  Copy
 ## these to your personal.ini (and uncomment) to correct.
@@ -4481,11 +4510,3 @@
 
 website_encodings:Windows-1252,utf8
 
-[www.wuxiaworld.xyz]
-use_basic_cache:true
-## Was wuxiaworld.co
-## Note that wuxiaworld.co != wuxiaworld.com
-## When dedup_order_chapter_list:true, use a heuristic algorithm
-## specific to wuxiaworld.xyz order and dedup chapters.
-dedup_order_chapter_list:false
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/epubutils.py 
new/FanFicFare-4.52.0/fanficfare/epubutils.py
--- old/FanFicFare-4.51.0/fanficfare/epubutils.py       2025-11-07 
16:53:24.000000000 +0100
+++ new/FanFicFare-4.52.0/fanficfare/epubutils.py       2025-12-01 
15:25:22.000000000 +0100
@@ -380,6 +380,7 @@
                 tag = soup.find('meta',{'name':'chaptertoctitle'})
                 if tag:
                     chaptertoctitle = tag['content']
+                else:
                     chaptertoctitle = chapterorigtitle
 
                 chaptertitle = None
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/fanficfare/story.py 
new/FanFicFare-4.52.0/fanficfare/story.py
--- old/FanFicFare-4.51.0/fanficfare/story.py   2025-11-07 16:53:24.000000000 
+0100
+++ new/FanFicFare-4.52.0/fanficfare/story.py   2025-12-01 15:25:22.000000000 
+0100
@@ -1056,7 +1056,11 @@
 
     def getChapterCount(self):
         ## returns chapter count adjusted for start-end range.
-        url_chapters = value = 
int(self.getMetadata("numChapters").replace(',',''))
+        value = 0
+        try:
+            url_chapters = value = 
int(self.getMetadata("numChapters").replace(',',''))
+        except:
+            logger.warning("Failed to get number of chapters--no chapters 
recorded by adapter")
         if self.chapter_first:
             value = url_chapters - (int(self.chapter_first) - 1)
         if self.chapter_last:
@@ -1479,9 +1483,11 @@
         newtempl = string.Template(newpattern)
         toctempl = string.Template(tocpattern)
 
+        marked_new_chapters = 0
         for index, chap in enumerate(self.chapters):
             if chap['new'] or self.getMetadata('newforanthology'):
                 usetempl = newtempl
+                marked_new_chapters += 1
             else:
                 usetempl = templ
             # logger.debug("chap(%s)"%chap)
@@ -1501,6 +1507,8 @@
             ## chapter['html'] is a string.
             chapter['html'] = 
self.do_chapter_text_replacements(chapter['html'])
             retval.append(chapter)
+        if marked_new_chapters:
+            self.setMetadata('marked_new_chapters',marked_new_chapters)
         return retval
 
     def do_chapter_text_replacements(self,data):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.51.0/pyproject.toml 
new/FanFicFare-4.52.0/pyproject.toml
--- old/FanFicFare-4.51.0/pyproject.toml        2025-11-07 16:53:24.000000000 
+0100
+++ new/FanFicFare-4.52.0/pyproject.toml        2025-12-01 15:25:22.000000000 
+0100
@@ -16,7 +16,7 @@
 #
 # For a discussion on single-sourcing the version, see
 # https://packaging.python.org/guides/single-sourcing-package-version/
-version = "4.51.0"
+version = "4.52.0"
 
 # This is a one-line description or tagline of what your project does. This
 # corresponds to the "Summary" metadata field:

Reply via email to