Hello community,

here is the log from the commit of package python-diskcache for 
openSUSE:Factory checked in at 2019-03-12 09:53:46
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-diskcache (Old)
 and      /work/SRC/openSUSE:Factory/.python-diskcache.new.28833 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-diskcache"

Tue Mar 12 09:53:46 2019 rev:2 rq:682845 version:3.1.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-diskcache/python-diskcache.changes        
2018-06-29 22:23:12.614627539 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-diskcache.new.28833/python-diskcache.changes 
    2019-03-12 09:53:50.883532877 +0100
@@ -1,0 +2,7 @@
+Fri Mar  8 13:19:48 UTC 2019 - Tomáš Chvátal <[email protected]>
+
+- Update to 3.1.1:
+  * Fix various timeouts
+  * Small fixes around
+
+-------------------------------------------------------------------

Old:
----
  diskcache-3.0.6.tar.gz

New:
----
  diskcache-3.1.1.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-diskcache.spec ++++++
--- /var/tmp/diff_new_pack.ebYXLr/_old  2019-03-12 09:53:51.439532767 +0100
+++ /var/tmp/diff_new_pack.ebYXLr/_new  2019-03-12 09:53:51.439532767 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-diskcache
 #
-# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -12,13 +12,13 @@
 # license that conforms to the Open Source Definition (Version 1.9)
 # published by the Open Source Initiative.
 
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
 #
 
 
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-diskcache
-Version:        3.0.6
+Version:        3.1.1
 Release:        0
 Summary:        Disk and file backed cache
 License:        Apache-2.0
@@ -31,7 +31,6 @@
 BuildRequires:  %{python_module setuptools}
 BuildRequires:  fdupes
 BuildRequires:  python-rpm-macros
-# Django1 is not available for python3 in distribution
 BuildRequires:  python2-Django1
 BuildArch:      noarch
 %python_subpackages
@@ -53,9 +52,9 @@
 %python_expand %fdupes %{buildroot}%{$python_sitelib}
 
 %check
-python2 %{_bindir}/nosetests
+nosetests-%{python_bin_suffix}
 # Django tests support only Django1 which is not in distribution for python3
-python3 %{_bindir}/nosetests --exclude test_djangocache.py
+nosetests-%{python3_bin_suffix} --exclude test_djangocache.py
 
 %files %{python_files}
 %license LICENSE

++++++ diskcache-3.0.6.tar.gz -> diskcache-3.1.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/PKG-INFO new/diskcache-3.1.1/PKG-INFO
--- old/diskcache-3.0.6/PKG-INFO        2018-04-20 01:46:38.000000000 +0200
+++ new/diskcache-3.1.1/PKG-INFO        2018-11-22 19:51:41.000000000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: diskcache
-Version: 3.0.6
+Version: 3.1.1
 Summary: Disk and file backed cache.
 Home-page: http://www.grantjenks.com/docs/diskcache/
 Author: Grant Jenks
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/diskcache/__init__.py 
new/diskcache-3.1.1/diskcache/__init__.py
--- old/diskcache-3.0.6/diskcache/__init__.py   2018-04-20 01:40:15.000000000 
+0200
+++ new/diskcache-3.1.1/diskcache/__init__.py   2018-11-22 19:44:35.000000000 
+0100
@@ -29,8 +29,8 @@
 
 
 __title__ = 'diskcache'
-__version__ = '3.0.6'
-__build__ = 0x030006
+__version__ = '3.1.1'
+__build__ = 0x030101
 __author__ = 'Grant Jenks'
 __license__ = 'Apache 2.0'
 __copyright__ = 'Copyright 2016-2018 Grant Jenks'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/diskcache/core.py 
new/diskcache-3.1.1/diskcache/core.py
--- old/diskcache-3.0.6/diskcache/core.py       2018-04-18 23:30:26.000000000 
+0200
+++ new/diskcache-3.1.1/diskcache/core.py       2018-11-22 19:41:20.000000000 
+0100
@@ -374,7 +374,7 @@
             raise ValueError('disk must subclass diskcache.Disk')
 
         self._directory = directory
-        self._timeout = 60    # Use 1 minute timeout for initialization.
+        self._timeout = 0  # Manually handle retries during initialization.
         self._local = threading.local()
 
         if not op.isdir(directory):
@@ -388,7 +388,7 @@
                         ' and could not be created' % self._directory
                     )
 
-        sql = self._sql
+        sql = self._sql_retry
 
         # Setup Settings table.
 
@@ -409,10 +409,8 @@
         # Chance to set pragmas before any tables are created.
 
         for key, value in sorted(sets.items()):
-            if not key.startswith('sqlite_'):
-                continue
-
-            self.reset(key, value, update=False)
+            if key.startswith('sqlite_'):
+                self.reset(key, value, update=False)
 
         sql('CREATE TABLE IF NOT EXISTS Settings ('
             ' key TEXT NOT NULL UNIQUE,'
@@ -537,7 +535,17 @@
 
 
     @property
-    def _sql(self):
+    def _con(self):
+        # Check process ID to support process forking. If the process
+        # ID changes, close the connection and update the process ID.
+
+        local_pid = getattr(self._local, 'pid', None)
+        pid = os.getpid()
+
+        if local_pid != pid:
+            self.close()
+            self._local.pid = pid
+
         con = getattr(self._local, 'con', None)
 
         if con is None:
@@ -547,9 +555,10 @@
                 isolation_level=None,
             )
 
-            # Some SQLite pragmas work on a per-connection basis so query the
-            # Settings table and reset the pragmas. The Settings table may not
-            # exist so catch and ignore the OperationalError that may occur.
+            # Some SQLite pragmas work on a per-connection basis so
+            # query the Settings table and reset the pragmas. The
+            # Settings table may not exist so catch and ignore the
+            # OperationalError that may occur.
 
             try:
                 select = 'SELECT key, value FROM Settings'
@@ -561,7 +570,40 @@
                     if key.startswith('sqlite_'):
                         self.reset(key, value, update=False)
 
-        return con.execute
+        return con
+
+
+    @property
+    def _sql(self):
+        return self._con.execute
+
+
+    @property
+    def _sql_retry(self):
+        sql = self._sql
+
+        # 2018-11-01 GrantJ - Some SQLite builds/versions handle
+        # the SQLITE_BUSY return value and connection parameter
+        # "timeout" differently. For a more reliable duration,
+        # manually retry the statement for 60 seconds. Only used
+        # by statements which modify the database and do not use
+        # a transaction (like those in ``__init__`` or ``reset``).
+        # See Issue #85 for and tests/issue_85.py for more details.
+
+        def _execute_with_retry(statement, *args, **kwargs):
+            start = time.time()
+            while True:
+                try:
+                    return sql(statement, *args, **kwargs)
+                except sqlite3.OperationalError as exc:
+                    if str(exc) != 'database is locked':
+                        raise
+                    diff = time.time() - start
+                    if diff > 60:
+                        raise
+                    time.sleep(0.001)
+
+        return _execute_with_retry
 
 
     @cl.contextmanager
@@ -942,12 +984,9 @@
 
             try:
                 value = self._disk.fetch(mode, filename, db_value, read)
-            except IOError as error:
-                if error.errno == errno.ENOENT:
-                    # Key was deleted before we could retrieve result.
-                    return default
-                else:
-                    raise
+            except IOError:
+                # Key was deleted before we could retrieve result.
+                return default
 
         else:  # Slow path, transaction required.
             cache_hit = (
@@ -1853,51 +1892,56 @@
         :raises Timeout: if database timeout expires
 
         """
+        sql = self._sql
+        sql_retry = self._sql_retry
+
         if value is ENOVAL:
             select = 'SELECT value FROM Settings WHERE key = ?'
-            (value,), = self._sql(select, (key,)).fetchall()
+            (value,), = sql_retry(select, (key,)).fetchall()
             setattr(self, key, value)
             return value
-        else:
-            if update:
-                with self._transact() as (sql, _):
-                    statement = 'UPDATE Settings SET value = ? WHERE key = ?'
-                    sql(statement, (value, key))
-            else:
-                sql = self._sql
 
-            if key.startswith('sqlite_'):
-
-                # 2016-02-17 GrantJ - PRAGMA and autocommit_level=None
-                # don't always play nicely together. Retry setting the
-                # PRAGMA. I think some PRAGMA statements expect to
-                # immediately take an EXCLUSIVE lock on the database. I
-                # can't find any documentation for this but without the
-                # retry, stress will intermittently fail with multiple
-                # processes.
-
-                pause = 0.001
-                count = 60000  # 60 / 0.001
-                error = sqlite3.OperationalError
-                pragma = key[7:]
+        if update:
+            statement = 'UPDATE Settings SET value = ? WHERE key = ?'
+            sql_retry(statement, (value, key))
+
+        if key.startswith('sqlite_'):
+            pragma = key[7:]
+
+            # 2016-02-17 GrantJ - PRAGMA and isolation_level=None
+            # don't always play nicely together. Retry setting the
+            # PRAGMA. I think some PRAGMA statements expect to
+            # immediately take an EXCLUSIVE lock on the database. I
+            # can't find any documentation for this but without the
+            # retry, stress will intermittently fail with multiple
+            # processes.
+
+            # 2018-11-05 GrantJ - Avoid setting pragma values that
+            # are already set. Pragma settings like auto_vacuum and
+            # journal_mode can take a long time or may not work after
+            # tables have been created.
 
-                for _ in range(count):
+            start = time.time()
+            while True:
+                try:
                     try:
-                        args = pragma, value
-                        sql('PRAGMA %s = %s' % args).fetchall()
-                    except sqlite3.OperationalError as exc:
-                        error = exc
-                        time.sleep(pause)
-                    else:
-                        break
-                else:
-                    raise error
-
-                del error
-
-            elif key.startswith('disk_'):
-                attr = key[5:]
-                setattr(self._disk, attr, value)
+                        (old_value,), = sql('PRAGMA %s' % (pragma)).fetchall()
+                        update = old_value != value
+                    except ValueError:
+                        update = True
+                    if update:
+                        sql('PRAGMA %s = %s' % (pragma, value)).fetchall()
+                    break
+                except sqlite3.OperationalError as exc:
+                    if str(exc) != 'database is locked':
+                        raise
+                    diff = time.time() - start
+                    if diff > 60:
+                        raise
+                    time.sleep(0.001)
+        elif key.startswith('disk_'):
+            attr = key[5:]
+            setattr(self._disk, attr, value)
 
-            setattr(self, key, value)
-            return value
+        setattr(self, key, value)
+        return value
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/diskcache/fanout.py 
new/diskcache-3.1.1/diskcache/fanout.py
--- old/diskcache-3.0.6/diskcache/fanout.py     2017-12-12 19:44:25.000000000 
+0100
+++ new/diskcache-3.1.1/diskcache/fanout.py     2018-11-22 17:19:18.000000000 
+0100
@@ -58,6 +58,9 @@
         When `read` is `True`, `value` should be a file-like object opened
         for reading in binary mode.
 
+        If database timeout occurs then fails silently unless `retry` is set to
+        `True` (default `False`).
+
         :param key: key for item
         :param value: value for item
         :param float expire: seconds until the key expires
@@ -84,6 +87,8 @@
     def __setitem__(self, key, value):
         """Set `key` and `value` item in cache.
 
+        Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
+
         :param key: key for item
         :param value: value for item
 
@@ -190,6 +195,9 @@
             retry=False):
         """Retrieve value from cache. If `key` is missing, return `default`.
 
+        If database timeout occurs then returns `default` unless `retry` is set
+        to `True` (default `False`).
+
         :param key: key for item
         :param default: return value if key is missing (default None)
         :param bool read: if True, return file handle to value
@@ -220,6 +228,8 @@
     def __getitem__(self, key):
         """Return corresponding value for `key` from cache.
 
+        Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
+
         :param key: key for item
         :return: value for item
         :raises KeyError: if key is not found
@@ -295,6 +305,9 @@
 
         Missing keys are ignored.
 
+        If database timeout occurs then fails silently unless `retry` is set to
+        `True` (default `False`).
+
         :param key: key for item
         :param bool retry: retry if database timeout expires (default False)
         :return: True if item was deleted
@@ -318,6 +331,8 @@
     def __delitem__(self, key):
         """Delete corresponding item for `key` from cache.
 
+        Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
+
         :param key: key for item
         :raises KeyError: if key is not found
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/diskcache.egg-info/PKG-INFO 
new/diskcache-3.1.1/diskcache.egg-info/PKG-INFO
--- old/diskcache-3.0.6/diskcache.egg-info/PKG-INFO     2018-04-20 
01:46:38.000000000 +0200
+++ new/diskcache-3.1.1/diskcache.egg-info/PKG-INFO     2018-11-22 
19:51:41.000000000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: diskcache
-Version: 3.0.6
+Version: 3.1.1
 Summary: Disk and file backed cache.
 Home-page: http://www.grantjenks.com/docs/diskcache/
 Author: Grant Jenks
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/docs/api.rst 
new/diskcache-3.1.1/docs/api.rst
--- old/diskcache-3.0.6/docs/api.rst    2017-12-15 02:52:06.000000000 +0100
+++ new/diskcache-3.1.1/docs/api.rst    2018-11-22 17:19:18.000000000 +0100
@@ -58,7 +58,7 @@
      pragma.
    * `sqlite_synchronous` (int) default 1, "NORMAL" - SQLite synchronous
      pragma.
-   * `disk_min_file_size` (int, in bytes) default one kilobyte - values with
+   * `disk_min_file_size` (int, in bytes) default 32 kilobytes - values with
      greater size are stored in files.
    * `disk_pickle_protocol` (int) default highest Pickle protocol - the Pickle
      protocol to use for data types that are not natively supported.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/docs/development.rst 
new/diskcache-3.1.1/docs/development.rst
--- old/diskcache-3.0.6/docs/development.rst    2017-08-07 23:39:51.000000000 
+0200
+++ new/diskcache-3.1.1/docs/development.rst    2018-11-22 19:22:50.000000000 
+0100
@@ -29,7 +29,7 @@
 
 #. Backend Compatibility
 
-   #. `Flask-Cache <https://pythonhosted.org/Flask-Cache/>`_
+   #. `Flask-Caching <https://flask-caching.readthedocs.io/>`_
    #. `Beaker <http://beaker.readthedocs.org/en/latest/>`_
    #. `dogpile.cache <http://dogpilecache.readthedocs.org/en/latest/>`_
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/docs/tutorial.rst 
new/diskcache-3.1.1/docs/tutorial.rst
--- old/diskcache-3.0.6/docs/tutorial.rst       2018-04-18 22:14:42.000000000 
+0200
+++ new/diskcache-3.1.1/docs/tutorial.rst       2018-11-22 17:19:18.000000000 
+0100
@@ -64,7 +64,7 @@
 -----
 
 The core of :doc:`DiskCache <index>` is :class:`diskcache.Cache` which
-represents a disk and file backed cache. As a Cache it supports a familiar
+represents a disk and file backed cache. As a Cache, it supports a familiar
 Python Mapping interface with additional cache and performance parameters.
 
     >>> from diskcache import Cache
@@ -77,16 +77,26 @@
 processes. In this way, they are also process-safe and support cross-process
 communication.
 
-When created, Cache objects open and maintain a file handle. As such, they do
-not survive process forking but they may be serialized using Pickle. Each
-thread that accesses a cache is also responsible for calling :meth:`close
-<diskcache.Cache.close>` on the cache. You can use a Cache reference in a
-`with` statement to safeguard calling :meth:`close <diskcache.Cache.close>`.
+Cache objects open and maintain one or more file handles. But unlike files, all
+Cache operations are atomic and Cache objects support process-forking and may
+be serialized using Pickle. Each thread that accesses a cache should also call
+:meth:`close <diskcache.Cache.close>` on the cache. Cache objects can be used
+in a `with` statement to safeguard calling :meth:`close
+<diskcache.Cache.close>`.
 
     >>> cache.close()
     >>> with Cache('/tmp/mycachedir') as reference:
     ...     pass
 
+Closed Cache objects will automatically re-open when accessed. But opening
+Cache objects is relatively slow, and since all operations are atomic, you can
+safely leave Cache objects open.
+
+    >>> cache.set(b'key') = b'value'
+    >>> cache.close()
+    >>> cache.get(b'key')  # Automatically opens, but slower.
+    'value'
+
 Set an item, get a value, and delete a key using the usual operators:
 
     >>> cache = Cache('/tmp/mycachedir')
@@ -302,18 +312,22 @@
 
 Another parameter, `timeout`, sets a limit on how long to wait for database
 transactions. Transactions are used for every operation that writes to the
-database. The `timeout` parameter is also present on
-:class:`diskcache.Cache`. When a :exc:`diskcache.Timeout` error occurs in
-:class:`Cache <diskcache.Cache>` methods, the exception is raised to the
-caller. In contrast, :class:`FanoutCache <diskcache.FanoutCache>` catches
+database. When the timeout expires, a :exc:`diskcache.Timeout` error is raised
+internally. This `timeout` parameter is also present on
+:class:`diskcache.Cache`. When a :exc:`Timeout <diskcache.Timeout>` error
+occurs in :class:`Cache <diskcache.Cache>` methods, the exception is raised to
+the caller. In contrast, :class:`FanoutCache <diskcache.FanoutCache>` catches
 timeout errors and aborts the operation. As a result, :meth:`set
 <diskcache.FanoutCache.set>` and :meth:`delete <diskcache.FanoutCache.delete>`
 methods may silently fail. Most methods that handle :exc:`Timeout
 <diskcache.Timeout>` exceptions also include a `retry` keyword parameter
-(default ``False``) to automatically repeat attempts that
-timeout. :class:`FanoutCache <diskcache.FanoutCache>` will never raise a
-:exc:`Timeout <diskcache.Timeout>` exception. The default `timeout` is 0.010
-(10 milliseconds).
+(default ``False``) to automatically repeat attempts that timeout. The Mapping
+interface operators: :meth:`cache[key] <diskcache.FanoutCache.__getitem__>`,
+:meth:`cache[key] = value <diskcache.FanoutCache.__setitem__>`, and :meth:`del
+cache[key] <diskcache.FanoutCache.__delitem__>` automatically retry operations
+when :exc:`Timeout <diskcache.Timeout>` errors occur. :class:`FanoutCache
+<diskcache.FanoutCache>` will never raise a :exc:`Timeout <diskcache.Timeout>`
+exception. The default `timeout` is 0.010 (10 milliseconds).
 
     >>> from diskcache import FanoutCache
     >>> cache = FanoutCache('/tmp/mycachedir', shards=4, timeout=1)
@@ -519,7 +533,7 @@
     >>> cache.size_limit
     4000000000
     >>> cache.disk_min_file_size
-    1024
+    32768
     >>> cache.reset('cull_limit', 0)  # Disable automatic evictions.
     0
     >>> cache.set(b'key', 1.234)
@@ -534,7 +548,7 @@
 <tutorial-cache>`. Changing these values will update the unprefixed attribute
 on the :class:`Disk <diskcache.Disk>` object.
 
-* `disk_min_file_size`, default one kilobyte. The minimum size to store a value
+* `disk_min_file_size`, default 32 kilobytes. The minimum size to store a value
   in a file.
 * `disk_pickle_protocol`, default highest Pickle protocol. The Pickle protocol
   to use for data types that are not natively supported.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/tests/stress_test_core.py 
new/diskcache-3.1.1/tests/stress_test_core.py
--- old/diskcache-3.0.6/tests/stress_test_core.py       2017-08-12 
23:48:00.000000000 +0200
+++ new/diskcache-3.1.1/tests/stress_test_core.py       2018-11-22 
17:19:18.000000000 +0100
@@ -3,7 +3,7 @@
 from __future__ import print_function
 
 import collections as co
-from diskcache import Cache, UnknownFileWarning, EmptyDirWarning
+from diskcache import Cache, UnknownFileWarning, EmptyDirWarning, Timeout
 import multiprocessing as mp
 import os
 import random
@@ -124,19 +124,24 @@
 
 
 def worker(queue, eviction_policy, processes, threads):
-    timings = {'get': [], 'set': [], 'delete': []}
+    timings = co.defaultdict(list)
     cache = Cache('tmp', eviction_policy=eviction_policy)
 
     for index, (action, key, value) in enumerate(iter(queue.get, None)):
         start = time.time()
 
-        if action == 'set':
-            cache.set(key, value, expire=EXPIRE)
-        elif action == 'get':
-            result = cache.get(key)
+        try:
+            if action == 'set':
+                cache.set(key, value, expire=EXPIRE)
+            elif action == 'get':
+                result = cache.get(key)
+            else:
+                assert action == 'delete'
+                cache.delete(key)
+        except Timeout:
+            miss = True
         else:
-            assert action == 'delete'
-            cache.delete(key)
+            miss = False
 
         stop = time.time()
 
@@ -144,7 +149,10 @@
             assert result == value
 
         if index > WARMUP:
-            timings[action].append(stop - start)
+            delta = stop - start
+            timings[action].append(delta)
+            if miss:
+                timings[action + '-miss'].append(delta)
 
     queue.put(timings)
 
@@ -179,7 +187,7 @@
 
     stop = time.time()
 
-    timings = {'get': [], 'set': [], 'delete': [], 'self': (stop - start)}
+    timings = co.defaultdict(list)
 
     for thread_queue in thread_queues:
         data = thread_queue.get()
@@ -243,7 +251,7 @@
         warnings.simplefilter('ignore', category=EmptyDirWarning)
         cache.check()
 
-    timings = {'get': [], 'set': [], 'delete': [], 'self': 0.0}
+    timings = co.defaultdict(list)
 
     for num in range(processes):
         with open('output-%s.pkl' % num, 'rb') as reader:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/tests/test_core.py 
new/diskcache-3.1.1/tests/test_core.py
--- old/diskcache-3.0.6/tests/test_core.py      2017-12-15 02:53:31.000000000 
+0100
+++ new/diskcache-3.1.1/tests/test_core.py      2018-11-22 17:19:18.000000000 
+0100
@@ -178,25 +178,6 @@
 
 
 @setup_cache
-def test_pragma(cache):
-    local = mock.Mock()
-    con = mock.Mock()
-    execute = mock.Mock()
-    cursor = mock.Mock()
-    fetchall = mock.Mock()
-
-    local.con = con
-    con.execute = execute
-    execute.return_value = cursor
-    cursor.fetchall = fetchall
-    fetchall.side_effect = [sqlite3.OperationalError, None]
-
-    size = 2 ** 28
-
-    with mock.patch.object(cache, '_local', local):
-        assert cache.reset('sqlite_mmap_size', size) == size
-
-@setup_cache
 @nt.raises(sqlite3.OperationalError)
 def test_pragma_error(cache):
     local = mock.Mock()
@@ -205,6 +186,7 @@
     cursor = mock.Mock()
     fetchall = mock.Mock()
 
+    local.pid = os.getpid()
     local.con = con
     con.execute = execute
     execute.return_value = cursor
@@ -299,17 +281,6 @@
         cache[0]
 
 
[email protected](IOError)
-@setup_cache
-def test_get_keyerror5(cache):
-    func = mock.Mock(side_effect=IOError(errno.EACCES, ''))
-
-    cache[0] = b'abcd' * 2 ** 20
-
-    with mock.patch('diskcache.core.open', func):
-        cache[0]
-
-
 @setup_cache
 def test_read(cache):
     cache.set(0, b'abcd' * 2 ** 20)
@@ -354,6 +325,7 @@
     con = mock.Mock()
     execute = mock.Mock()
 
+    local.pid = os.getpid()
     local.con = con
     con.execute = execute
     execute.side_effect = sqlite3.OperationalError
@@ -839,63 +811,6 @@
 
 
 @setup_cache
-def test_multiple_threads(cache):
-    values = list(range(100))
-
-    cache[0] = 0
-    cache[1] = 1
-    cache[2] = 2
-
-    cache = dc.Cache('tmp')
-
-    def worker():
-        sets = list(values)
-        random.shuffle(sets)
-
-        with dc.Cache('tmp') as thread_cache:
-            for value in sets:
-                thread_cache[value] = value
-
-    threads = [threading.Thread(target=worker) for _ in range(10)]
-
-    for thread in threads:
-        thread.start()
-
-    for thread in threads:
-        thread.join()
-
-    for value in values:
-        assert cache[value] == value
-
-    assert len(cache.check()) == 0
-
-
-@setup_cache
-def test_thread_safe(cache):
-    values = list(range(100))
-
-    def worker():
-        with cache:
-            sets = list(values)
-            random.shuffle(sets)
-            for value in sets:
-                cache[value] = value
-
-    threads = [threading.Thread(target=worker) for _ in range(10)]
-
-    for thread in threads:
-        thread.start()
-
-    for thread in threads:
-        thread.join()
-
-    for value in values:
-        assert cache[value] == value
-
-    assert len(cache.check()) == 0
-
-
-@setup_cache
 def test_with(cache):
     with dc.Cache('tmp') as tmp:
         tmp[u'a'] = 0
@@ -935,36 +850,6 @@
     cache.check()
 
 
-def stress_add(cache, limit, results):
-    total = 0
-    for num in range(limit):
-        if cache.add(num, num):
-            total += 1
-            # Stop one thread from running ahead of others.
-            time.sleep(0.001)
-    results.append(total)
-
-
-@setup_cache
-def test_add_concurrent(cache):
-    results = co.deque()
-    limit = 1000
-
-    threads = [
-        threading.Thread(target=stress_add, args=(cache, limit, results))
-        for _ in range(16)
-    ]
-
-    for thread in threads:
-        thread.start()
-
-    for thread in threads:
-        thread.join()
-
-    assert sum(results) == limit
-    cache.check()
-
-
 @setup_cache
 @nt.raises(dc.Timeout)
 def test_add_timeout(cache):
@@ -972,6 +857,7 @@
     con = mock.Mock()
     execute = mock.Mock()
 
+    local.pid = os.getpid()
     local.con = con
     con.execute = execute
     execute.side_effect = sqlite3.OperationalError
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/diskcache-3.0.6/tests/test_fanout.py 
new/diskcache-3.1.1/tests/test_fanout.py
--- old/diskcache-3.0.6/tests/test_fanout.py    2017-12-14 19:09:59.000000000 
+0100
+++ new/diskcache-3.1.1/tests/test_fanout.py    2018-11-22 17:19:18.000000000 
+0100
@@ -2,6 +2,7 @@
 
 from __future__ import print_function
 
+import collections as co
 import errno
 import functools as ft
 import hashlib
@@ -32,13 +33,17 @@
 if sys.hexversion < 0x03000000:
     range = xrange
 
-def setup_cache(func):
+def setup_cache(func=None, **options):
+    if func is None:
+        return lambda func: setup_cache(func, **options)
+
     @ft.wraps(func)
     def wrapper():
         shutil.rmtree('tmp', ignore_errors=True)
-        with dc.FanoutCache('tmp') as cache:
+        with dc.FanoutCache('tmp', **options) as cache:
             func(cache)
         shutil.rmtree('tmp', ignore_errors=True)
+
     return wrapper
 
 
@@ -163,6 +168,36 @@
         assert cache.add(0, 0, retry=True)
 
 
+def stress_add(cache, limit, results):
+    total = 0
+    for num in range(limit):
+        if cache.add(num, num, retry=True):
+            total += 1
+            # Stop one thread from running ahead of others.
+            time.sleep(0.001)
+    results.append(total)
+
+
+@setup_cache(shards=1)
+def test_add_concurrent(cache):
+    results = co.deque()
+    limit = 1000
+
+    threads = [
+        threading.Thread(target=stress_add, args=(cache, limit, results))
+        for _ in range(16)
+    ]
+
+    for thread in threads:
+        thread.start()
+
+    for thread in threads:
+        thread.join()
+
+    assert sum(results) == limit
+    cache.check()
+
+
 @setup_cache
 def test_incr(cache):
     cache.incr('key', delta=3) == 3
@@ -207,27 +242,24 @@
         time.sleep(0.001)
 
 
-def test_incr_concurrent():
+@setup_cache(shards=1, timeout=0.001)
+def test_incr_concurrent(cache):
     count = 16
-    limit = 500
+    limit = 50
 
-    with dc.FanoutCache('tmp', timeout=0.001) as cache:
-        threads = [
-            threading.Thread(target=stress_incr, args=(cache, limit))
-            for _ in range(count)
-        ]
+    threads = [
+        threading.Thread(target=stress_incr, args=(cache, limit))
+        for _ in range(count)
+    ]
 
-        for thread in threads:
-            thread.start()
+    for thread in threads:
+        thread.start()
 
-        for thread in threads:
-            thread.join()
+    for thread in threads:
+        thread.join()
 
-    with dc.FanoutCache('tmp') as cache:
-        assert cache.get(b'key') == count * limit
-        cache.check()
-
-    shutil.rmtree('tmp', ignore_errors=True)
+    assert cache.get(b'key') == count * limit
+    cache.check()
 
 
 @setup_cache


Reply via email to