Here we go:

I edited the original test-code that Niphlod posted, just enough to put my 
hypothesis to the test.
Here are the results:

C:\Python26\python.exe F:/Experimental/web2py_272/web2py/test_executesql.py
Times are for 100 iterations
Preparing....
started bench
speed_executesql() total=0.920402 avg=0.009204
speed_normal_rows() total=20.190159 avg=0.201902
speed_rows_with_cacheable() total=18.954632 avg=0.189546
speed_executesql_with_colnames() total=1.070893 avg=0.010709
speed_rows_with_simple_processor() total=1.118015 avg=0.011180


Process finished with exit code 0


I left in the addition of 'colname or [...' to the 'if as_dict:' block of 
the executesql() in the dal.py, so it actually did the addition of the 
colnames that were given - otherwise, in the original test-code of Niphlod, 
it was disregarded... (as 'as_dict=True' is passed-in)
So, the 'speed_executesql_with_colnames()' is actually doing what it should 
in this test-run.

Let's see:
speed_executesql_with_colnames is about x20 times faster than 
speed_normal_rows in this run...

You can test it out yourself - here is the code:

-- 
Resources:
- http://web2py.com
- http://web2py.com/book (Documentation)
- http://github.com/web2py/web2py (Source code)
- https://code.google.com/p/web2py/issues/list (Report Issues)
--- 
You received this message because you are subscribed to the Google Groups 
"web2py-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.
import timeit
from gluon.dal import DAL, Field

times = 100
print("Times are for %i iterations" % times)

##prepare a large dict
print 'Preparing....'

db = DAL('sqlite:memory')
fieldNames = [('f_'+str(i)) for i in range(10)]
#load 1k records into a test table
db.define_table('test_table', *[Field(fieldName) for fieldName in fieldNames])
db.define_table('test_table2', Field('f_test_table', db.test_table), *[Field(fieldName) for fieldName in fieldNames])
db.commit()
db(db.test_table.id > 0).delete()
db(db.test_table2.id > 0).delete()
db.commit()
for a in range(1000):
    values = dict([(fieldName,'testValue') for fieldName in fieldNames])
    db.test_table.insert(**values)
    values['f_test_table'] = a
    db.test_table2.insert(**values)
db.commit()

query = (db.test_table2.f_test_table==db.test_table.id)
sql = db(query)._select(db.test_table.ALL,db.test_table2.ALL)
sql_colnames = sql.split(' FROM')[0][7:].split(', ')

def bench(cmd, imprt):
    t = timeit.Timer(cmd, imprt)
    s = t.timeit(number=times)
    print("%s total=%02f avg=%02f" % (cmd, s, (s/times)))
    return s

def myprocessor(rows, fields, colnames, blob_decode=True, cacheable = False):
    return [dict(zip(colnames,row)) for row in rows]

def speed_executesql():
    rtn = db.executesql(sql, as_dict=True)

def speed_executesql_with_colnames():
    rtn = db.executesql(sql, colnames=sql_colnames, as_dict=True)

def speed_normal_rows():
    rtn = db(query).select()

def speed_rows_with_cacheable():
    rtn = db(query).select(cacheable=True)

def speed_rows_with_simple_processor():
    rtn = db(query).select(processor=myprocessor)

def full_bench():
    print 'started bench'
    b1 = bench('speed_executesql()', 'from __main__ import speed_executesql')
    b2 = bench('speed_normal_rows()', 'from __main__ import speed_normal_rows')
    b3 = bench('speed_rows_with_cacheable()', 'from __main__ import speed_rows_with_cacheable')
    b4 = bench('speed_executesql_with_colnames()', 'from __main__ import speed_executesql_with_colnames')
    b5 = bench('speed_rows_with_simple_processor()', 'from __main__ import speed_rows_with_simple_processor, myprocessor')
    
if __name__ == '__main__':
    full_bench()

Reply via email to