On my system Julia is also ~4x faster than Python, so it seems that this
is mainly a problem with the BLAS library. Did you build Julia
yourself or are you using binaries?
I see some improvements (20%) using an in-place sig function and some
other bits. Together with Pablo's trick this halfs execution time:
import Base.LinAlg, Base.LinAlg.BlasReal, Base.LinAlg.BlasComplex
Base.disable_threaded_libs()
function sig!(out, x::Vector)
for i=1:length(x)
out[i] = 1.0 ./ (1. + exp(-x[i]))
end
nothing
end
function
mf_loop(Ndt::Int64,V0::Vector{Float64},V::Vector{Float64},dt::Float64,W::Matrix{Float64},J::Matrix{Float64})
sv = copy(V0)
V = copy(V0)
for i=1:Ndt
# sv = sig(V)
# V = (1-dt)*V + J*sv * dt + W[:,i]
sig!(sv,V)
BLAS.gemv!('N',dt,J,sv,1.0-dt,V)
for j=1:length(V)
V[j] += W[j,i]
end
end
nothing
end
...
On Thu, 2016-01-21 at 17:10, Dupont <[email protected]> wrote:
> Hi,
>
> Thank you for your answer. It is a bit faster for me too but still 3x
> slower than numpy (note that you have to uncomment #sv = sig(V) in your
> code.
>
> Best regards,