I know there has been some work last year at improving the overhead in
creating cython.view.arrays (cvarray). But I have noticed a large overhead
in converting that cvarray to a memoryview (e.g  double[:]). I would like
some advice in inspecting cython's source code.

For example, where can I find
"__Pyx_PyObject_to_MemoryviewSlice_ds_double"?




Through testing, my guess is that this function introduces most of the
overhead:
"""

@timefunc("cvarray_to_memoryview")

def _(Py_ssize_t L):

cdef Py_ssize_t i

cdef double* arrptr

cdef cvarray cvarr

cdef double[:] arr

for i in range(loops):

arrptr = <double*> malloc(sizeof(double) * L)

cvarr = cvarray((L,),sizeof(double),'d', mode="c", allocate_buffer=False)

cvarr.callback_free_data = free

cvarr.data = <char *> arrptr

arr = cvarr # Only difference is this conversion


# Prevents dead code elimination

str(arr[0])

@timefunc("cvarray")

def _(Py_ssize_t L):

cdef Py_ssize_t i

cdef cvarray cvarr = cvarray((L,),sizeof(double),'d')


for i in range(loops):

arrptr = <double*> malloc(sizeof(double) * L)

cvarr = cvarray((L,),sizeof(double),'d', mode="c", allocate_buffer=False)

cvarr.callback_free_data = free

cvarr.data = <char *> arrptr


# Prevents dead code elimination

str(cvarr[0])

"""
Timings (loops = 100000; L in [1, 10, 100, 1000, 10000])

Running cvarray_to_memoryview

1.555391 1.510217 1.518601 1.503522 1.733853 μs

Running cvarray

0.342308 0.353832 0.335155 0.361952 0.518414 μs

Golden Rockefeller
_______________________________________________
cython-devel mailing list
cython-devel@python.org
https://mail.python.org/mailman/listinfo/cython-devel

Reply via email to