Vo Minh Thu (OpenERP) has proposed merging
lp:~openerp-dev/openobject-server/trunk-simple-table-stats-vmt into
lp:openobject-server.
Requested reviews:
OpenERP Core Team (openerp)
For more details, see:
https://code.launchpad.net/~openerp-dev/openobject-server/trunk-simple-table-stats-vmt/+merge/122685
--
https://code.launchpad.net/~openerp-dev/openobject-server/trunk-simple-table-stats-vmt/+merge/122685
Your team OpenERP R&D Team is subscribed to branch
lp:~openerp-dev/openobject-server/trunk-simple-table-stats-vmt.
=== modified file 'openerp/modules/registry.py'
--- openerp/modules/registry.py 2012-08-13 15:05:01 +0000
+++ openerp/modules/registry.py 2012-09-04 13:55:25 +0000
@@ -182,6 +182,8 @@
try:
registry.do_parent_store(cr)
registry.get('ir.actions.report.xml').register_all(cr)
+ if openerp.tools.config['query_count_file']:
+ openerp.sql_db.write_query_count(openerp.tools.config['query_count_file'], False)
cr.commit()
finally:
cr.close()
=== modified file 'openerp/sql_db.py'
--- openerp/sql_db.py 2012-02-06 20:31:51 +0000
+++ openerp/sql_db.py 2012-09-04 13:55:25 +0000
@@ -35,13 +35,21 @@
__all__ = ['db_connect', 'close_db']
+from datetime import datetime as mdt
+from datetime import timedelta
from functools import wraps
+from inspect import currentframe
import logging
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
from psycopg2.psycopg1 import cursor as psycopg1cursor
-from threading import currentThread
+import re
+import threading
+
+import tools
+from tools import config
+from tools.func import frame_codeinfo
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
@@ -65,19 +73,58 @@
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
-
-import tools
-from tools.func import frame_codeinfo
-from datetime import datetime as mdt
-from datetime import timedelta
-import threading
-from inspect import currentframe
-
-import re
-re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$');
-re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$');
-
-sql_counter = 0
+re_query = re.compile('.* (from|into) "?([a-zA-Z_0-9]+)"? .*$');
+
+query_count_lock = threading.RLock()
+query_count = 0
+query_counts = {}
+
+def count_query(query, delay):
+ """
+ Collect a few statistics about database usage. They can be later written
+ to file using the `write_query_count` function.
+
+ :param string query: the query being observed.
+ :param int delay: the time used to run the query against the database.
+ :returns: nothing
+ """
+ global query_count_lock
+ global query_count
+ global query_counts
+ with query_count_lock:
+ query_count += 1
+ m = re_query.match(query.lower())
+ if m:
+ typ = m.group(1)
+ table = m.group(2)
+ table_counts = query_counts.setdefault(table, {'from': (0, 0), 'into': (0, 0)})
+ calls, elapsed_time = table_counts[typ]
+ table_counts[typ] = (calls + 1, elapsed_time + delay)
+
+def write_query_count(file_name, header):
+ """
+ Write a few statistics about database table usage as CSV. The statistics
+ are collected using the `count_query` function.
+
+ Assuming a `here.txt` filename: a possible command to display the results
+ is:
+ > cat here.txt | column -s, -t | sort -n -k 2
+
+ :param string file_name: file name to write the data.
+ :param bool header: specify whether a header is written before the data.
+ :returns: nothing
+ """
+ with query_count_lock:
+ with open(file_name, 'w') as w:
+ if header:
+ w.write("Table,Elapsed time (FROM),Elapsed time (FROM),Calls (FROM),Elapsed time (INTO),Elapsed time (INTO),Calls (INTO)\n")
+ for table, counts in query_counts.iteritems():
+ count_from, delay_from = counts['from']
+ delay_from_ = timedelta(microseconds=delay_from)
+ count_into, delay_into = counts['into']
+ delay_into_ = timedelta(microseconds=delay_into)
+ w.write("%s,%s,%s,%s,%s,%s,%s\n" % (table, delay_from, delay_from_, count_from, delay_into, delay_into_, count_into))
+
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
@@ -141,26 +188,18 @@
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
- def check(f):
- @wraps(f)
- def wrapper(self, *args, **kwargs):
- if self.__closed:
- msg = 'Unable to use a closed cursor.'
- if self.__closer:
- msg += ' It was closed at %s, line %s' % self.__closer
- raise psycopg2.OperationalError(msg)
- return f(self, *args, **kwargs)
- return wrapper
+ def check(self):
+ if self.__closed:
+ msg = 'Unable to use a closed cursor.'
+ if self.__closer:
+ msg += ' It was closed at %s, line %s' % self.__closer
+ raise psycopg2.OperationalError(msg)
def __init__(self, pool, dbname, serialized=True):
- self.sql_from_log = {}
- self.sql_into_log = {}
-
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
- self.sql_log = _logger.isEnabledFor(logging.DEBUG)
+ self.sql_log = True
- self.sql_log_count = 0
self.__closed = True # avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._pool = pool
@@ -197,14 +236,14 @@
_logger.warning(msg)
self._close(True)
- @check
def execute(self, query, params=None, log_exceptions=None):
+ self.check()
if '%d' in query or '%f' in query:
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. "
"Use only %s")
- if self.sql_log:
+ if self.sql_log or config['query_count_file']:
now = mdt.now()
try:
@@ -219,22 +258,12 @@
_logger.exception("bad query: %s", self._obj.query or query)
raise
- if self.sql_log:
+ if self.sql_log or config['query_count_file']:
+ _logger.debug("query: %s", self._obj.query)
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
+ count_query(query, delay)
- _logger.debug("query: %s", self._obj.query)
- self.sql_log_count+=1
- res_from = re_from.match(query.lower())
- if res_from:
- self.sql_from_log.setdefault(res_from.group(1), [0, 0])
- self.sql_from_log[res_from.group(1)][0] += 1
- self.sql_from_log[res_from.group(1)][1] += delay
- res_into = re_into.match(query.lower())
- if res_into:
- self.sql_into_log.setdefault(res_into.group(1), [0, 0])
- self.sql_into_log[res_into.group(1)][0] += 1
- self.sql_into_log[res_into.group(1)][1] += delay
return res
@@ -243,36 +272,8 @@
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, set(ids))
- def print_log(self):
- global sql_counter
- sql_counter += self.sql_log_count
- if not self.sql_log:
- return
- def process(type):
- sqllogs = {'from':self.sql_from_log, 'into':self.sql_into_log}
- sum = 0
- if sqllogs[type]:
- sqllogitems = sqllogs[type].items()
- sqllogitems.sort(key=lambda k: k[1][1])
- _logger.debug("SQL LOG %s:", type)
- sqllogitems.sort(lambda x,y: cmp(x[1][0], y[1][0]))
- for r in sqllogitems:
- delay = timedelta(microseconds=r[1][1])
- _logger.debug("table: %s: %s/%s",
- r[0], delay, r[1][0])
- sum+= r[1][1]
- sqllogs[type].clear()
- sum = timedelta(microseconds=sum)
- _logger.debug("SUM %s:%s/%d [%d]",
- type, sum, self.sql_log_count, sql_counter)
- sqllogs[type].clear()
- process('from')
- process('into')
- self.sql_log_count = 0
- self.sql_log = False
-
- @check
def close(self):
+ self.check()
return self._close(False)
def _close(self, leak=False):
@@ -281,7 +282,6 @@
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(),3)
- self.print_log()
self._obj.close()
@@ -299,13 +299,13 @@
if leak:
self._cnx.leaked = True
else:
- chosen_template = tools.config['db_template']
+ chosen_template = config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self._pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
- @check
def autocommit(self, on):
+ self.check()
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
@@ -324,20 +324,20 @@
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
- @check
def commit(self):
""" Perform an SQL `COMMIT`
"""
+ self.check()
return self._cnx.commit()
- @check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
+ self.check()
return self._cnx.rollback()
- @check
def __getattr__(self, name):
+ self.check()
return getattr(self._obj, name)
""" Set the mode of postgres operations for all cursors
@@ -480,7 +480,7 @@
def dsn(db_name):
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
- cfg = tools.config['db_' + p]
+ cfg = config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
@@ -499,8 +499,8 @@
def db_connect(db_name):
global _Pool
if _Pool is None:
- _Pool = ConnectionPool(int(tools.config['db_maxconn']))
- currentThread().dbname = db_name
+ _Pool = ConnectionPool(int(config['db_maxconn']))
+ threading.currentThread().dbname = db_name
return Connection(_Pool, db_name)
def close_db(db_name):
@@ -508,7 +508,7 @@
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
if _Pool:
_Pool.close_all(dsn(db_name))
- ct = currentThread()
+ ct = threading.currentThread()
if hasattr(ct, 'dbname'):
delattr(ct, 'dbname')
=== modified file 'openerp/tools/config.py'
--- openerp/tools/config.py 2012-03-02 11:28:34 +0000
+++ openerp/tools/config.py 2012-09-04 13:55:25 +0000
@@ -74,7 +74,8 @@
# Not exposed in the configuration file.
self.blacklist_for_save = set(
['publisher_warranty_url', 'load_language', 'root_path',
- 'init', 'save', 'config', 'update', 'stop_after_init'])
+ 'init', 'save', 'config', 'update', 'stop_after_init',
+ 'query_count_file'])
# dictionary mapping option destination (keys in self.options) to MyOptions.
self.casts = {}
@@ -191,6 +192,9 @@
group.add_option('--log-level', dest='log_level', type='choice', choices=levels,
my_default='info', help='specify the level of the logging. Accepted values: ' + str(levels) + ' (deprecated option).')
+ group.add_option("--query-count-file", dest="query_count_file", my_default=False,
+ help="Specify if some SQL query statistics are saved in a given file.")
+
parser.add_option_group(group)
# SMTP Group
@@ -375,7 +379,8 @@
'netrpc', 'xmlrpc', 'syslog', 'without_demo', 'timezone',
'xmlrpcs_interface', 'xmlrpcs_port', 'xmlrpcs',
'static_http_enable', 'static_http_document_root', 'static_http_url_prefix',
- 'secure_cert_file', 'secure_pkey_file', 'dbfilter', 'log_handler', 'log_level'
+ 'secure_cert_file', 'secure_pkey_file', 'dbfilter', 'log_handler', 'log_level',
+ 'query_count_file'
]
for arg in keys:
_______________________________________________
Mailing list: https://launchpad.net/~openerp-dev-gtk
Post to : [email protected]
Unsubscribe : https://launchpad.net/~openerp-dev-gtk
More help : https://help.launchpad.net/ListHelp