Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-fanficfare for
openSUSE:Factory checked in at 2022-08-07 18:34:06
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
and /work/SRC/openSUSE:Factory/.python-fanficfare.new.1521 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-fanficfare"
Sun Aug 7 18:34:06 2022 rev:41 rq:993599 version:4.14.3
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes
2022-07-13 13:45:57.094079787 +0200
+++
/work/SRC/openSUSE:Factory/.python-fanficfare.new.1521/python-fanficfare.changes
2022-08-07 18:34:14.337206777 +0200
@@ -1,0 +2,8 @@
+Sun Aug 7 06:36:18 UTC 2022 - Matej Cepl <[email protected]>
+
+- Update to 4.14.3:
+ - Update translations.
+ - Remove site: webnovel.com See #843
+ - Fix for qt6 vs qt5 in Calibre6 and personal.ini search.
+
+-------------------------------------------------------------------
Old:
----
FanFicFare-4.14.0.tar.gz
New:
----
FanFicFare-4.14.3.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.KiSaF5/_old 2022-08-07 18:34:14.829208208 +0200
+++ /var/tmp/diff_new_pack.KiSaF5/_new 2022-08-07 18:34:14.833208219 +0200
@@ -21,7 +21,7 @@
%define skip_python2 1
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
Name: python-fanficfare
-Version: 4.14.0
+Version: 4.14.3
Release: 0
Summary: Tool for making eBooks from stories on fanfiction and other
web sites
License: GPL-3.0-only
++++++ FanFicFare-4.14.0.tar.gz -> FanFicFare-4.14.3.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/calibre-plugin/__init__.py
new/FanFicFare-4.14.3/calibre-plugin/__init__.py
--- old/FanFicFare-4.14.0/calibre-plugin/__init__.py 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/calibre-plugin/__init__.py 2022-07-15
17:26:07.000000000 +0200
@@ -33,7 +33,7 @@
from calibre.customize import InterfaceActionBase
# pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 14, 0)
+__version__ = (4, 14, 3)
## Apparently the name for this class doesn't matter--it was still
## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/calibre-plugin/dialogs.py
new/FanFicFare-4.14.3/calibre-plugin/dialogs.py
--- old/FanFicFare-4.14.0/calibre-plugin/dialogs.py 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/calibre-plugin/dialogs.py 2022-07-15
17:26:07.000000000 +0200
@@ -22,13 +22,17 @@
QGridLayout, QPushButton, QFont, QLabel, QCheckBox,
QIcon,
QLineEdit, QComboBox, QProgressDialog, QTimer,
QDialogButtonBox,
QScrollArea, QPixmap, Qt, QAbstractItemView, QTextEdit,
- pyqtSignal, QGroupBox, QFrame)
+ pyqtSignal, QGroupBox, QFrame, QTextCursor)
try:
# qt6 Calibre v6+
QTextEditNoWrap = QTextEdit.LineWrapMode.NoWrap
+ MoveOperations = QTextCursor.MoveOperation
+ MoveMode = QTextCursor.MoveMode
except:
# qt5 Calibre v2-5
QTextEditNoWrap = QTextEdit.NoWrap
+ MoveOperations = QTextCursor
+ MoveMode = QTextCursor
from calibre.gui2 import gprefs
show_download_options = 'fff:add new/update dialogs:show_download_options'
@@ -1454,7 +1458,7 @@
else:
# Make the next search start from the begining again
self.lastStart = 0
- self.textedit.moveCursor(self.textedit.textCursor().Start)
+ self.textedit.moveCursor(MoveOperations.Start)
def moveCursor(self,start,end):
@@ -1466,7 +1470,8 @@
# Next we move the Cursor by over the match and pass the KeepAnchor
parameter
# which will make the cursor select the match's text
- cursor.movePosition(cursor.Right,cursor.KeepAnchor,end - start)
+ cursor.movePosition(MoveOperations.Right,
+ MoveMode.KeepAnchor,end - start)
# And finally we set this new cursor as the parent's
self.textedit.setTextCursor(cursor)
@@ -1480,10 +1485,10 @@
cursor.setPosition(0)
# Next we move the Cursor down lineno times
- cursor.movePosition(cursor.Down,cursor.MoveAnchor,lineno-1)
+ cursor.movePosition(MoveOperations.Down,MoveMode.MoveAnchor,lineno-1)
# Next we move the Cursor to the end of the line
- cursor.movePosition(cursor.EndOfLine,cursor.KeepAnchor,1)
+ cursor.movePosition(MoveOperations.EndOfLine,MoveMode.KeepAnchor,1)
# And finally we set this new cursor as the parent's
self.textedit.setTextCursor(cursor)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/calibre-plugin/plugin-defaults.ini
new/FanFicFare-4.14.3/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.14.0/calibre-plugin/plugin-defaults.ini 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/calibre-plugin/plugin-defaults.ini 2022-07-15
17:26:07.000000000 +0200
@@ -3766,32 +3766,6 @@
website_encodings:Windows-1252,utf8
-[www.webnovel.com]
-use_basic_cache:true
-## Extra metadata that this adapter knows about. See [archiveofourown.org]
-## for examples of how to use them.
-extra_valid_entries:translator, editor, sitetags
-translator_label: Translator
-editor_label: Editor
-sitetags_label:Site Tags
-extra_titlepage_entries: translator, editor, sitetags
-
-## webnovel.com is, as a whole, utf-8. There are even utf-8 encoded
-## Chinese characters in the HTML *comments*. However, I've seen at
-## least one story that still managed to have Simplified Chinese
-## encoding in it. But none of the SC encodings I tried worked; I
-## suspect because of the utf-8 comments in the HTML.
-#chardet_confidence_limit:0.9
-#website_encodings:auto,utf8,Windows-1252
-
-## Attempt to fix pseudo HTML found in some stories, that causes text to
-## seemingly disappear. In most cases this should work without any unintended
-## side-effects.
-fix_pseudo_html:false
-
-## Clear FanFiction from defaults, site is original fiction.
-extratags:
-
[www.whofic.com]
use_basic_cache:true
website_encodings:Windows-1252,utf8
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/calibre-plugin/translations/fr.po
new/FanFicFare-4.14.3/calibre-plugin/translations/fr.po
--- old/FanFicFare-4.14.0/calibre-plugin/translations/fr.po 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/calibre-plugin/translations/fr.po 2022-07-15
17:26:07.000000000 +0200
@@ -19,7 +19,7 @@
"Project-Id-Version: calibre-plugins\n"
"POT-Creation-Date: 2022-07-06 11:14-0500\n"
"PO-Revision-Date: 2014-06-19 22:55+0000\n"
-"Last-Translator: Jorg722, 2022\n"
+"Last-Translator: Ptit Prince <[email protected]>, 2016-2017,2019-2022\n"
"Language-Team: French
(http://www.transifex.com/calibre/calibre-plugins/language/fr/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -2393,7 +2393,7 @@
#: fff_plugin.py:2850
msgid "Existing Book Update Failed"
-msgstr ""
+msgstr "La mise ?? jour du livre existant a ??chou??"
#: fff_plugin.py:2851
msgid ""
@@ -2401,7 +2401,7 @@
"Story URL: %s<br>\n"
"Error: %s<br>\n"
"The previously downloaded book is still in the anthology, but FFF doesn't
have the metadata to fill this field.\n"
-msgstr ""
+msgstr "Un livre pr??existant de cette anthologie a ??chou?? ?? trouver des
m??tadonn??es.<br>\nURL de l'histoire : %s<br>\nErreur : %s<br>\nLe livre
pr??c??demment t??l??charg?? est toujours dans l'anthologie, mais FFF ne
dispose pas des m??tadonn??es n??cessaires pour remplir ce champ.\n"
#: fff_plugin.py:2945
msgid "Anthology containing:"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/calibre-plugin/translations/ja.po
new/FanFicFare-4.14.3/calibre-plugin/translations/ja.po
--- old/FanFicFare-4.14.0/calibre-plugin/translations/ja.po 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/calibre-plugin/translations/ja.po 2022-07-15
17:26:07.000000000 +0200
@@ -2383,7 +2383,7 @@
#: fff_plugin.py:2850
msgid "Existing Book Update Failed"
-msgstr ""
+msgstr "??????????????????????????????????????????"
#: fff_plugin.py:2851
msgid ""
@@ -2391,7 +2391,7 @@
"Story URL: %s<br>\n"
"Error: %s<br>\n"
"The previously downloaded book is still in the anthology, but FFF doesn't
have the metadata to fill this field.\n"
-msgstr ""
+msgstr
"???????????????????????????????????????????????????????????????????????????<br>\n??????????????????URL:
%s<br>\n?????????:
%s<br>\n?????????????????????????????????????????????????????????????????????FFF???????????????????????????????????????????????????????????????\n"
#: fff_plugin.py:2945
msgid "Anthology containing:"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/fanficfare/adapters/__init__.py
new/FanFicFare-4.14.3/fanficfare/adapters/__init__.py
--- old/FanFicFare-4.14.0/fanficfare/adapters/__init__.py 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/fanficfare/adapters/__init__.py 2022-07-15
17:26:07.000000000 +0200
@@ -125,7 +125,6 @@
from . import adapter_sinfuldreamscomwhisperedmuse
from . import adapter_sinfuldreamscomwickedtemptation
from . import adapter_asianfanficscom
-from . import adapter_webnovelcom
from . import adapter_mttjustoncenet
from . import adapter_narutoficorg
from . import adapter_starskyhutcharchivenet
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.14.0/fanficfare/adapters/adapter_webnovelcom.py
new/FanFicFare-4.14.3/fanficfare/adapters/adapter_webnovelcom.py
--- old/FanFicFare-4.14.0/fanficfare/adapters/adapter_webnovelcom.py
2022-07-11 19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/fanficfare/adapters/adapter_webnovelcom.py
1970-01-01 01:00:00.000000000 +0100
@@ -1,262 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014 Fanficdownloader team, 2018 FanFicFare team
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Adapted by GComyn on April 16, 2017
-from __future__ import absolute_import
-try:
- # python3
- from html import escape
-except ImportError:
- # python2
- from cgi import escape
-import difflib
-import json
-import logging
-import re
-import time
-# py2 vs py3 transition
-from ..six import text_type as unicode
-
-from .base_adapter import BaseSiteAdapter
-from .. import exceptions as exceptions
-from ..htmlcleanup import stripHTML
-from ..dateutils import parse_relative_date_string
-
-logger = logging.getLogger(__name__)
-
-def getClass():
- return WWWWebNovelComAdapter
-
-class WWWWebNovelComAdapter(BaseSiteAdapter):
- _GET_VIP_CONTENT_DELAY = 8
-
- def __init__(self, config, url):
- BaseSiteAdapter.__init__(self, config, url)
- # get storyId from url
- # https://www.webnovel.com/book/6831837102000205
-
- m = re.match(self.getSiteURLPattern(),url)
- if m:
- self.story.setMetadata('storyId',m.group('id'))
- # normalized story URL.
- title = m.group('title') or ""
- self._setURL('https://' + self.getSiteDomain() + '/book/' + title
+ self.story.getMetadata('storyId'))
- else:
- raise exceptions.InvalidStoryURL(url,
- self.getSiteDomain(),
- self.getSiteExampleURLs())
-
-
- # Each adapter needs to have a unique site abbreviation.
- self.story.setMetadata('siteabbrev', 'wncom')
-
- self._csrf_token = None
-
- @staticmethod # must be @staticmethod, don't remove it.
- def getSiteDomain():
- # The site domain. Does have www here, if it uses it.
- return 'www.webnovel.com'
-
- @classmethod
- def getSiteExampleURLs(cls):
- return 'https://' + cls.getSiteDomain() +
'/book/story-title_123456789012345 https://' + cls.getSiteDomain() +
'/book/123456789012345'
-
- def getSiteURLPattern(self):
- #
https://www.webnovel.com/book/game-of-thrones%3A-the-prideful-one._17509790806343405
- return r'https://' + re.escape(self.getSiteDomain()) +
r'/book/(?P<title>.*?_)?(?P<id>\d+)'
-
- # Getting the chapter list and the meta data, plus 'is adult' checking.
- def doExtractChapterUrlsAndMetadata(self, get_cover=True):
- url = self.story.getMetadata('storyUrl') #self.url
- logger.debug(url)
-
- data = self.get_request(url)
- # logger.debug("\n"+data)
-
- if 'We might have some troubles to find out this page.' in data:
- raise exceptions.StoryDoesNotExist('{0} says: "" for url
"{1}"'.format(self.getSiteDomain(), self.url))
-
- soup = self.make_soup(data)
- # removing all of the scripts
- for tag in soup.findAll('script') + soup.find_all('svg'):
- tag.extract()
-
-
- # This is the block that holds the metadata
- bookdetails = soup.find('div', {'class': '_8'})
-
- # Title
- title = bookdetails.find('h1')
- # done as a loop incase there isn't one, or more than one.
- for tag in title.find_all('small'):
- tag.extract()
- self.story.setMetadata('title', stripHTML(title))
-
- detail_txt = stripHTML(bookdetails.find('h2', {'class':
'det-hd-detail'}))
- if "Completed" in detail_txt:
- self.story.setMetadata('status', 'Completed')
- else:
- self.story.setMetadata('status', 'In-Progress')
-
- meta_tag = soup.find('address')
- meta_txt = stripHTML(meta_tag)
-
- def parse_meta(mt,label,setmd):
- if label in mt:
- data = mt.split(label,1)[1].split('Translator:',
1)[0].split('Editor:', 1)[0].strip()
- if data:
- # print("setting %s to %s"%(setmd, data))
- self.story.setMetadata(setmd, data)
-
- # Author details. Name, id, url...
- autdet = bookdetails.find('a', {'class': re.compile('c_primary')})
- if autdet:
- self.story.setMetadata('author',stripHTML(autdet))
- self.story.setMetadata('authorId',re.search(r"/([0-9]+)",
autdet.get("href")).group(1))
- self.story.setMetadata('authorUrl', "https://www.webnovel.com" +
autdet.get("href"))
- else:
- parse_meta(meta_txt,'Author:','author')
- self.story.setMetadata('authorId',
self.story.getMetadata('author'))
- # There is no authorUrl for this story, so I'm setting it to the
story url
- # otherwise it defaults to the file location
- self.story.setMetadata('authorUrl', url)
- parse_meta(meta_txt,'Translator:','translator')
- parse_meta(meta_txt,'Editor:','editor')
-
- cattags = soup.find('div',{'class':'_mn'})
- if cattags:
- cats = cattags.find_all('a',href=re.compile(r'/category/'))
- self.story.extendList('category',[stripHTML(cat) for cat in cats])
-
- poptags = soup.find('div',{'class':'m-tags'})
- if poptags:
- sitetags = poptags.find_all('a',href=re.compile(r'/tags/'))
- self.story.extendList('sitetags',[sitetag.string.replace("# ","")
for sitetag in sitetags])
-
- ## get _csrfToken cookie for chapter list fetch
- for cookie in self.get_configuration().get_cookiejar():
- if cookie.name == '_csrfToken':
- self._csrf_token = csrf_token = cookie.value
- break
- else:
- raise exceptions.FailedToDownload('csrf token could not be found')
-
- ## get chapters from a json API url.
- jsondata = json.loads(self.get_request(
- "https://" + self.getSiteDomain() +
"/go/pcm/chapter/get-chapter-list?_csrfToken=" + csrf_token + "&bookId=" +
self.story.getMetadata(
- 'storyId') + "&pageIndex=0"))
-
- # logger.debug(json.dumps(jsondata, sort_keys=True,
- # indent=2, separators=(',', ':')))
- for volume in jsondata["data"]["volumeItems"]:
- for chap in volume["chapterItems"]:
- # Only allow free and VIP type 1 chapters
- if chap['isAuth'] not in [1]: # Ad wall indicator
- # seems to have changed
- # --JM
- continue
- chap_title = 'Chapter ' + unicode(self.num_chapters()+1) + ' -
' + chap['chapterName']
- chap_Url = url.rstrip('/') + '/' + chap['chapterId']
- self.add_chapter(chap_title, chap_Url,
- {'volumeName':volume['volumeName'],
- 'volumeId':volume['volumeId'],
- ## dates are months or years ago for
- ## so many chapters I judge this
- ## worthless.
- #
'publishTimeFormat':chap['publishTimeFormat'],
- #
'date':parse_relative_date_string(chap['publishTimeFormat']).strftime(self.getConfig("datethreadmark_format",
- #
self.getConfig("dateCreated_format","%Y-%m-%d %H:%M:%S"))),
- })
-
-
- if get_cover:
- cover_elements = soup.find('div', {'class': '_4'}).find_all('img')
- image_sources = [ x['src'] for x in cover_elements ]
- largest_image_size = max([
re.search(r'\/([\d]{3})\/([\d]{3})\.([a-z]{3})', x).group(1) for x in
image_sources ])
- for source in image_sources:
- if re.search(r'\/{0}\/{0}\.'.format(largest_image_size),
source):
- cover_url = 'https:' + source
- self.setCoverImage(url, cover_url)
-
- detabt = soup.find('div', {'class': 'det-abt'})
- synopsis = detabt.find('p')
- self.setDescription(url, synopsis)
- rating = detabt.find('span',{'class': 'vam'})
- if rating:
- self.story.setMetadata('rating',rating.string)
-
- last_updated_string =
jsondata['data']['lastChapterItem']['publishTimeFormat']
- last_updated = parse_relative_date_string(last_updated_string)
-
- # Published date is always unknown, so simply don't set it
- # self.story.setMetadata('datePublished', UNIX_EPOCHE)
- self.story.setMetadata('dateUpdated', last_updated)
-
- # grab the text for an individual chapter.
- def getChapterText(self, url):
- logger.debug('Getting chapter text from: %s' % url)
-
- data = self.get_request(url)
- # soup = self.make_soup(data)
-
- save_chapter_soup = self.make_soup('<div class="story"></div>')
- save_chapter=save_chapter_soup.find('div')
-
- def append_tag(elem,tag,string=None,classes=None):
- '''bs4 requires tags be added separately.'''
- new_tag = save_chapter_soup.new_tag(tag)
- if string:
- new_tag.string=string
- if classes:
- new_tag['class']=[classes]
- elem.append(new_tag)
- return new_tag
-
- # Chapter text is now apparently json encoded in a <script> tag.
- # This seems to find it.
- start_marker="var chapInfo= "
- end_marker=";g_data.chapInfo=chapInfo"
- data = data[data.index(start_marker)+len(start_marker):]
- data = data[:data.index(end_marker)]
-
- # unescape a bunch of stuff that json lib chokes on.
- data = re.sub(r"\\([ /<>'&])",r"\1",data)
- # logger.debug("\n"+data)
-
- ch_json = json.loads(data)
- # logger.debug(json.dumps(ch_json, sort_keys=True,
- # indent=2, separators=(',', ':')))
-
- for paragraph in ch_json["chapterInfo"]["contents"]:
- p = paragraph["content"]
- # logger.debug(p)
- ## sometimes wrapped in <p>, sometimes not. Treat as html
- ## if starts with <p>
- if p.startswith('<p>'):
- p = self.make_soup(p)
- ## make_soup--html5lib/bs really--adds a full html tag
- ## set like:
- # "<html><head></head><body> ... </body></html>"
- ## but utf8FromSoup will remove them all. Noted here
- ## because it looks odd in debug.
- # logger.debug(p)
- save_chapter.append(p)
- else:
- append_tag(save_chapter,'p',p)
-
- # logger.debug(save_chapter)
- return self.utf8FromSoup(url,save_chapter)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/fanficfare/cli.py
new/FanFicFare-4.14.3/fanficfare/cli.py
--- old/FanFicFare-4.14.0/fanficfare/cli.py 2022-07-11 19:28:24.000000000
+0200
+++ new/FanFicFare-4.14.3/fanficfare/cli.py 2022-07-15 17:26:07.000000000
+0200
@@ -28,7 +28,7 @@
import os, sys, platform
-version="4.14.0"
+version="4.14.3"
os.environ['CURRENT_VERSION_ID']=version
global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/fanficfare/configurable.py
new/FanFicFare-4.14.3/fanficfare/configurable.py
--- old/FanFicFare-4.14.0/fanficfare/configurable.py 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/fanficfare/configurable.py 2022-07-15
17:26:07.000000000 +0200
@@ -292,7 +292,6 @@
'use_threadmarks_cover':(base_xenforo2_list,None,boollist),
'skip_sticky_first_posts':(base_xenforo2_list,None,boollist),
'include_dice_rolls':(base_xenforo2_list,None,boollist+['svg']),
- 'fix_pseudo_html': (['webnovel.com'], None, boollist),
'fix_excess_space': (['novelonlinefull.com', 'novelall.com'],
['epub', 'html'], boollist),
'dedup_order_chapter_list': (['m.wuxiaworld.co',
'novelupdates.cc'], None, boollist),
'show_nsfw_cover_images': (['fiction.live'], None, boollist),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/fanficfare/defaults.ini
new/FanFicFare-4.14.3/fanficfare/defaults.ini
--- old/FanFicFare-4.14.0/fanficfare/defaults.ini 2022-07-11
19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/fanficfare/defaults.ini 2022-07-15
17:26:07.000000000 +0200
@@ -3770,32 +3770,6 @@
website_encodings:Windows-1252,utf8
-[www.webnovel.com]
-use_basic_cache:true
-## Extra metadata that this adapter knows about. See [archiveofourown.org]
-## for examples of how to use them.
-extra_valid_entries:translator, editor, sitetags
-translator_label: Translator
-editor_label: Editor
-sitetags_label:Site Tags
-extra_titlepage_entries: translator, editor, sitetags
-
-## webnovel.com is, as a whole, utf-8. There are even utf-8 encoded
-## Chinese characters in the HTML *comments*. However, I've seen at
-## least one story that still managed to have Simplified Chinese
-## encoding in it. But none of the SC encodings I tried worked; I
-## suspect because of the utf-8 comments in the HTML.
-#chardet_confidence_limit:0.9
-#website_encodings:auto,utf8,Windows-1252
-
-## Attempt to fix pseudo HTML found in some stories, that causes text to
-## seemingly disappear. In most cases this should work without any unintended
-## side-effects.
-fix_pseudo_html:false
-
-## Clear FanFiction from defaults, site is original fiction.
-extratags:
-
[www.whofic.com]
use_basic_cache:true
website_encodings:Windows-1252,utf8
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.14.0/setup.py
new/FanFicFare-4.14.3/setup.py
--- old/FanFicFare-4.14.0/setup.py 2022-07-11 19:28:24.000000000 +0200
+++ new/FanFicFare-4.14.3/setup.py 2022-07-15 17:26:07.000000000 +0200
@@ -26,7 +26,7 @@
name=package_name,
# Versions should comply with PEP440.
- version="4.14.0",
+ version="4.14.3",
description='A tool for downloading fanfiction to eBook formats',
long_description=long_description,