Hi,
I want to implement Euler-Maruyama for a neural network. My implementation
in numpy seems 4x faster than my Julia code 100ms vs 430ms
Can anyone give me a hint about how I can speed things up please?
Thank you for your help,
Best regards.
## Python code
import numpy as np
import time
def sig(x):
return 1.0/ (1. + np.exp(-x))
def mf_loop(Ndt,V0,V,dt,W,J):
sv = np.copy(V0)
V = np.copy(V0)
for i in range(Ndt):
sv = 1.0/ (1. + np.exp(-V))
V = V + ( -V+np.dot(J,sv) ) * dt + np.sqrt(dt) * W[:,i]
N = 100
# timestep Euler Maruyama
dt = 0.1
Ndt = 10000
sigma = 0.1
W = (np.random.randn(N,Ndt)) * sigma
J = (np.random.rand(N,N))/N
# initial condition
V0 = np.random.rand(N)
# result vector
V = np.copy(V0)
print "calcul\n"
tic = time.time()
mf_loop(Ndt,V0,V,dt,W,J)
toc = time.time()
print " -> dt = ", (toc-tic),"s"
### Julia code
import Base.LinAlg, Base.LinAlg.BlasReal, Base.LinAlg.BlasComplex
function sig(x::Union{Float64,Vector})
return 1.0 ./ (1. + exp(-x))
end
function
mf_loop(Ndt::Int64,V0::Vector{Float64},V::Vector{Float64},dt::Float64,W::Matrix{Float64},J::Matrix{Float64})
sv = copy(V0)
V = copy(V0)
for i=1:Ndt
sv = sig(V)
V = V + ( -V + J*sv ) * dt + W[:,i]
# BLAS.gemv!('N',dt,J,sv,1.0-dt,V)
# BLAS.axpy!(1., W[:,i], V)
end
nothing
end
N = 100
dt = 0.1
Ndt = 10000
sigma = 0.1
W = (randn(N,Ndt)) * sigma * sqrt(dt)
J = (rand(N,N))/N
V0 = rand(N)
V = copy(V0)
println( "calcul\n")
mf_loop(1,V0,V,dt,W,J)
@time mf_loop(Ndt,V0,V,dt,W,J)