changeset 634f8347fb75 in trytond:default
details: https://hg.tryton.org/trytond?cmd=changeset;node=634f8347fb75
description:
Use separate connection to query cache table
The table is a hot spot for contention so we must minimize the duration
lock
are kept on it. As at the start of a transaction, the cache may be sync
and so
it locks all the row in the table, the transaction used for this query
should
not be the main one that lasts a long time. Idem for the commit.
issue8447
review283591002
diffstat:
trytond/cache.py | 85 ++++++++++++++++++++++++++------------------
trytond/modules/__init__.py | 3 +
trytond/tests/test_cache.py | 13 ++++++
3 files changed, 66 insertions(+), 35 deletions(-)
diffs (158 lines):
diff -r 075d94d578c4 -r 634f8347fb75 trytond/cache.py
--- a/trytond/cache.py Mon Jul 08 17:10:34 2019 +0200
+++ b/trytond/cache.py Mon Jul 08 21:24:33 2019 +0200
@@ -165,8 +165,9 @@
@classmethod
def sync(cls, transaction):
- dbname = transaction.database.name
- if not _clear_timeout and transaction.database.has_channel():
+ database = transaction.database
+ dbname = database.name
+ if not _clear_timeout and database.has_channel():
with cls._listener_lock:
if dbname not in cls._listener:
cls._listener[dbname] = listener = threading.Thread(
@@ -175,12 +176,17 @@
return
if (datetime.now() - cls._clean_last).total_seconds() < _clear_timeout:
return
- with transaction.connection.cursor() as cursor:
- table = Table(cls._table)
- cursor.execute(*table.select(_cast(table.timestamp), table.name))
- timestamps = {}
- for timestamp, name in cursor.fetchall():
- timestamps[name] = timestamp
+ connection = database.get_connection(readonly=True, autocommit=True)
+ try:
+ with connection.cursor() as cursor:
+ table = Table(cls._table)
+ cursor.execute(*table.select(
+ _cast(table.timestamp), table.name))
+ timestamps = {}
+ for timestamp, name in cursor.fetchall():
+ timestamps[name] = timestamp
+ finally:
+ database.put_connection(connection)
for name, timestamp in timestamps.items():
try:
inst = cls._instances[name]
@@ -197,39 +203,48 @@
reset = cls._reset.setdefault(transaction, set())
if not reset:
return
- dbname = transaction.database.name
- with transaction.connection.cursor() as cursor:
- if not _clear_timeout and transaction.database.has_channel():
+ database = transaction.database
+ dbname = database.name
+ if not _clear_timeout and transaction.database.has_channel():
+ with transaction.connection.cursor() as cursor:
cursor.execute(
'NOTIFY "%s", %%s' % cls._channel,
(json.dumps(list(reset), separators=(',', ':')),))
- else:
- for name in reset:
- cursor.execute(*table.select(table.name,
- where=table.name == name,
- limit=1))
- if cursor.fetchone():
- # It would be better to insert only
- cursor.execute(*table.update([table.timestamp],
- [CurrentTimestamp()],
+ else:
+ connection = database.get_connection(
+ readonly=False, autocommit=True)
+ try:
+ with connection.cursor() as cursor:
+ for name in reset:
+ cursor.execute(*table.select(table.name, table.id,
+ table.timestamp,
+ where=table.name == name,
+ limit=1))
+ if cursor.fetchone():
+ # It would be better to insert only
+ cursor.execute(*table.update([table.timestamp],
+ [CurrentTimestamp()],
+ where=table.name == name))
+ else:
+ cursor.execute(*table.insert(
+ [table.timestamp, table.name],
+ [[CurrentTimestamp(), name]]))
+
+ cursor.execute(*table.select(
+ Max(table.timestamp),
where=table.name == name))
- else:
- cursor.execute(*table.insert(
- [table.timestamp, table.name],
- [[CurrentTimestamp(), name]]))
+ timestamp, = cursor.fetchone()
- cursor.execute(*table.select(
- Max(table.timestamp),
- where=table.name == name))
- timestamp, = cursor.fetchone()
+ cursor.execute(*table.select(
+ _cast(Max(table.timestamp)),
+ where=table.name == name))
+ timestamp, = cursor.fetchone()
- cursor.execute(*table.select(
- _cast(Max(table.timestamp)),
- where=table.name == name))
- timestamp, = cursor.fetchone()
-
- inst = cls._instances[name]
- inst._clear(dbname, timestamp)
+ inst = cls._instances[name]
+ inst._clear(dbname, timestamp)
+ connection.commit()
+ finally:
+ database.put_connection(connection)
reset.clear()
@classmethod
diff -r 075d94d578c4 -r 634f8347fb75 trytond/modules/__init__.py
--- a/trytond/modules/__init__.py Mon Jul 08 17:10:34 2019 +0200
+++ b/trytond/modules/__init__.py Mon Jul 08 21:24:33 2019 +0200
@@ -14,6 +14,7 @@
from sql.functions import CurrentTimestamp
import trytond.tools as tools
+from trytond.cache import Cache
from trytond.config import config
from trytond.exceptions import MissingDependenciesException
from trytond.transaction import Transaction
@@ -266,6 +267,8 @@
]))
module2state[module] = 'activated'
+ # Avoid clearing cache to prevent dead lock on ir.cache table
+ Cache.rollback(transaction)
transaction.commit()
if not update:
diff -r 075d94d578c4 -r 634f8347fb75 trytond/tests/test_cache.py
--- a/trytond/tests/test_cache.py Mon Jul 08 17:10:34 2019 +0200
+++ b/trytond/tests/test_cache.py Mon Jul 08 21:24:33 2019 +0200
@@ -104,6 +104,19 @@
self.wait_cache_sync()
self.assertEqual(cache.get('foo'), None)
+ def test_memory_cache_nested_transactions(self):
+ "Test MemoryCache with nested transactions"
+ # Create entry in the cache table to trigger 2 updates
+ with Transaction().start(DB_NAME, USER):
+ cache.clear()
+ # Ensure sync is performed on start
+ time.sleep(cache_mod._clear_timeout)
+
+ with Transaction().start(DB_NAME, USER) as transaction1:
+ cache.clear()
+ with transaction1.new_transaction():
+ cache.clear()
+
def test_memory_cache_sync(self):
"Test MemoryCache synchronisation"
with Transaction().start(DB_NAME, USER):