I have recently switched from programming heavily in MATLAB to programming in
Python. Hence I am having some issues running the Python code that I have
written. I am using IPython with Anaconda2 on Windows 7 and using numPy and
SciPy to integrate a system of ordinary differential equations. I have
generalized the system of ODEs for any number 'N' of them.
I have two versions of code that do the same thing, and give the same error
message. One version uses 'exec' and 'eval' heavily, and the other uses arrays
heavily. Here is my code for the array version:
----------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
#Constants and parameters
N = 2
K00 = np.logspace(0,3,101,10)
len1 = len(K00)
epsilon = 0.01
y0 = [0]*(3*N/2+3)
u1 = 0
u2 = 0
u3 = 0
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
a = 0.001
d = 0.001
k = 0.999
S10 = 1e5
P0 = 1
tfvec = np.tile(1e10,(1,5))
tf = tfvec[0,0]
time = np.linspace(0,tf,len1)
#Defining dy/dt's
def f(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m+1]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i+1]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+ \
d*(PS[p]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+ \
d*PS[N/2]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2])- \
a*P*runsum1+(d+k+r2)*PS[N/2]
for j in range(0,3*N/2+3):
return ydot[j]
# Initial conditions
y0[0] = S10
for i in range(1,3*N/2+1):
y0[i] = 0
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
# Solve the DEs
soln = odeint(f,y0,time,mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = soln[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = soln[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N] = soln[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = soln[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = soln[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N][series] = soln[len1-1,gamma]
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
plt.plot(np.log10(K00),utot[:,0])
plt.show()
----------------------------------------------------------------------
ERROR MESSAGE:
RuntimeError: The size of the array returned by func (1) does not match the
size of y0 (6).
----------------------------------------------------------------------
I am facing a RuntimeError in the ODE integrator step. Please help me resolve
this. Thanks in advance.
--
https://mail.python.org/mailman/listinfo/python-list