Complex ODE (ODEintWarning: Excess work done on this call) - ode

I am trying to numerically solve a rather complex system of ordinary differential equations. Odeint gives error I guess for the excessive computational work. Any idea on alternatives?
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import scipy
from scipy.integrate import odeint
# declare parameters:
k1 = 2
k2 = 1.8E6
k3 = 48
k3r = 2.8e8
k4 = 1.1e6
k5 = 3
k6 = 6.6e9
k6r = 9.4
k7 = 40
k8 = 7.1
k9 = 0.25
k10 = 0.053
H = 1.28
A = 0.42 #BrO3-
C = 0.0012 #CH2(COOH)2
#define tspan and I.C.
t = np.linspace(0,50,num=5000, endpoint=False)
J0=[0,0.11,0.0012,0,0,0,0.19,0]
def adv_oregonator(j, t):
dxdt = k1*A*j[1]*H**2-k2*H*j[1]*j[0]-k3*H*A*j[0]+k3r*j[5]**2+k4*H*j[5]*(C-j[2])-2*k5*j[0]**2;
dydt = -k1*A*j[1]*H**2-k2*H*j[1]*j[0]-k6*j[3]*j[1]*H+k7*j[4]*j[6]+k9*j[7]*j[2];
dzdt = k4*H*j[5]*(C-j[2])-k9*j[7]*j[2]-k10*j[2]*j[6];
dpdt = k1*A*j[1]*H**2+2*k2*H*j[1]*j[0]+k5*j[0]**2-k6*j[3]*j[1]*H-k8*j[3]*j[6];
dudt = k6*j[3]*j[1]*H-k6r*j[4]-k7*j[4]*j[6];
dwdt = 2*k2*H*A*j[0]-2*k3r*j[5]**2-k4*H*j[5]*(C-j[2]);
dmdt = -k7*j[4]*j[6]-k8*j[3]*j[6]-k10*j[2]*j[6];
dbdt = k7*j[4]*j[6]+k8*j[3]*j[6]-k9*j[2]*j[7];
djdt = [dxdt,dydt,dzdt,dpdt,dudt,dwdt,dmdt,dbdt]
return djdt
j = odeint(adv_oregonator,J0,t)
This is the original system.
And these are the weird plots that I get when I run the script:

Related

GEKKO for minimum time solution optimal control problem

This is a standard benchmark problem for minimum flight time.
This is a very standard problem. I am trying to solve it in gekko, but it is neither converging to local minima nor global, here is the code. I followed the set up from the Jennings problem but still, if anybody can help, that would be very nice.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
import math
m = GEKKO()
nt = 501
tm = np.linspace(0,1,nt)
m.time = tm
x1=m.Var(value=-2.5)
x2=m.Var(value=0)
u=m.MV(value=1,lb=0,ub=2*math.pi)
p = np.zeros(nt)
p[-1] = 1.0
final = m.Param(value=p)
tf = m.FV(value=0,lb=0.1,ub=100.0)
tf.STATUS = 1
if x2.value>1:
m.Equation(x1.dt()==((1+(x2-1)**2)*m.cos(u)*tf))
m.Equation(x2.dt()==((1+(x2-1)**2)*m.sin(u)*tf))
else:
m.Equation(x1.dt()==(m.cos(u)*tf))
m.Equation(x2.dt()==(m.sin(u)*tf))
#m.Equation(x1*final<=3)
#m.Equation(x2*final<=0)
m.Minimize(tf)
m.options.IMODE = 6
m.solve()
tm = tm * tf.value[0]
plt.figure(1)
plt.plot(tm,x1.value,'k-',lw=2,label=r'$x_1$')
plt.plot(tm,x2.value,'b-',lw=2,label=r'$x_2$')
plt.plot(tm,u.value,'r--',lw=2,label=r'$u$')
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Value')
plt.show()
Use the m.if3() function for the conditional statement. Here is the local solution that they discussed on pg 332 of the Cristiani and Martinon publication.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
import math
m = GEKKO()
nt = 101; pi = math.pi
tm = np.linspace(0,1,nt); m.time = tm
x1=m.Var(value=-2.5,lb=-100,ub=100)
x2=m.Var(value=0,lb=-100,ub=100)
u=m.MV(value=0,lb=-pi,ub=pi); u.STATUS=1; u.DCOST=0.1
p = np.zeros(nt); p[-1] = 1.0
final = m.Param(value=p)
tf = m.FV(value=10,lb=0.1,ub=100.0); tf.STATUS = 1
c = m.if3(x2-1,1,(x2-1)**2+1)
m.Equation(x1.dt()==c*m.cos(u)*tf)
m.Equation(x2.dt()==c*m.sin(u)*tf)
# hard constraints (fix endpoint)
#m.fix_final(x1,3)
#m.fix_final(x2,0)
# soft constraints (objective)
m.Minimize(100*final*(x1-3)**2)
m.Minimize(100*final*(x2-0)**2)
# minimize final time
# initialize with IPOPT Solver
m.Minimize(tf)
m.options.IMODE = 6
m.options.SOLVER=3
m.solve()
# find MINLP solution with APOPT Solver
m.options.SOLVER=1
m.options.TIME_SHIFT=0
m.solve()
tm = tm * tf.value[0]
plt.figure(figsize=(8,5))
plt.plot(tm,x1.value,'k-',lw=2,label=r'$x_1$')
plt.plot(tm,x2.value,'b-',lw=2,label=r'$x_2$')
plt.plot(tm,u.value,'r--',lw=2,label=r'$u$')
plt.legend(loc='best'); plt.grid()
plt.xlabel('Time'); plt.ylabel('Value')
plt.savefig('results.png',dpi=300); plt.show()
The global solution is shown in the paper.
The solvers in Gekko (APOPT, BPOPT, IPOPT) are local solvers. You need to add constraints or use different initial guess values to find the global optimum.

Regression with constraints on contribution from variables

I'm trying to develop a regression model with constraints on effect from the independent variables. So my model equation is y = a0 + a1x1 + a2x2 with 200 datapoints. What I want to achieve is sum(a1x1) over 200 datapoints should fall in certain range i.e. lb1<sum(a1x1)<ub1. I am using Gekko for the optimization part and a got stuck while applying this condition.
I am using the following code where ubdict is the dictionary for the boundaries:
m = gk.GEKKO(remote=False)
m.options.IMODE=2 #Regression mode
y = np.array(df['y']) #dependant vars for optimization
x = np.array(df[X]) #array of independent vars for optimization
n = x.shape[1] #number of variables
c = m.Array(m.FV, n+1) #array of parameters and intercept
for ci in c:
ci.STATUS = 1 #calculate fixed parameter
xp = [None]*n
#load data
xd = m.Array(m.Param,n)
yd = m.Param(value=y)
for i in range(n):
xd[i].value = x[:,i]
xp[i] = m.Var()
if ubound_dict[i] >= 0:
xp[i] = m.Var(lb=0, ub=ubdict[i])
elif ubound_dict[i] < 0:
xp[i] = m.Var(lb=ubdict[i], ub=0)
m.Equation(xp[i]==c[i]*xd[i])
yp = m.Var()
m.Equation(yp==m.sum([xp[i] for i in range(n)] + [c[n]]))
#Minimize difference between actual and predicted y
m.Minimize((yd-yp)**2)
#APOPT solver
m.options.SOLVER = 1
#Solve
m.solve(disp=True)
#Retrieve parameter values
a = [i.value[0] for i in c]
print(a)
But this is applying the constraint row-wise. What I want is something like
xp[i] = m.Var(lb=0, ub=ubdict[i])
m.Equation(xp[i]==sum(c[i]*xd[i]) over observations)
Any suggestion would be of great help!
Below is a similar problem with sample data.
Regression Mode with IMODE=2
Use the m.vsum() object in Gekko with IMODE=2. Gekko lets you write the equations once and then applies the data to each equation. This is more efficient for large-scale data sets.
import numpy as np
from gekko import GEKKO
# load data
x1 = np.array([1,2,5,3,2,5,2])
x2 = np.array([5,6,7,2,1,3,2])
ym = np.array([3,2,3,5,6,7,8])
# model
m = GEKKO()
c = m.Array(m.FV,3)
for ci in c:
ci.STATUS=1
x1 = m.Param(value=x1)
x2 = m.Param(value=x2)
ymeas = m.Param(value=ym)
ypred = m.Var()
m.Equation(ypred == c[0] + c[1]*x1 + c[2]*x2)
# add constraint on sum(c[1]*x1) with vsum
v1 = m.Var(); m.Equation(v1==c[1]*x1)
con = m.Var(lb=0,ub=10); m.Equation(con==m.vsum(v1))
m.Minimize((ypred-ymeas)**2)
m.options.IMODE = 2
m.solve()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
for i,ci in enumerate(c):
print(i,ci.value[0])
# plot solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
plt.plot(ymeas,ypred,'ro')
plt.plot([0,10],[0,10],'k-')
plt.xlabel('Meas')
plt.ylabel('Pred')
plt.savefig('results.png',dpi=300)
plt.show()
Optimization Mode (IMODE=3)
The optimization mode 3 allows you to write each equation and objective term individually. Both give the same solution.
import numpy as np
from gekko import GEKKO
# load data
x1 = np.array([1,2,5,3,2,5,2])
x2 = np.array([5,6,7,2,1,3,2])
ym = np.array([3,2,3,5,6,7,8])
n = len(ym)
# model
m = GEKKO()
c = m.Array(m.FV,3)
for ci in c:
ci.STATUS=1
yp = m.Array(m.Var,n)
for i in range(n):
m.Equation(yp[i]==c[0]+c[1]*x1[i]+c[2]*x2[i])
m.Minimize((yp[i]-ym[i])**2)
# add constraint on sum(c[1]*x1)
s = m.Var(lb=0,ub=10); m.Equation(s==c[1]*sum(x1))
m.options.IMODE = 3
m.solve()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
for i,ci in enumerate(c):
print(i,ci.value[0])
# plot solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
ypv = [yp[i].value[0] for i in range(n)]
plt.plot(ym,ypv,'ro')
plt.plot([0,10],[0,10],'k-')
plt.xlabel('Meas')
plt.ylabel('Pred')
plt.savefig('results.png',dpi=300)
plt.show()
For future questions, please create a simple and complete example that demonstrates the issue.

What is the formula being used in the in-sample prediction of statsmodels?

I would like to know what formula is being used in statsmodels ARIMA predict/forecast. For a simple AR(1) model I thought that it would be y_t = a1 * y_t-1. However, I am not able to recreate the results produced by forecast or predict.
Here's what I am trying to do:
from statsmodels.tsa.arima.model import ARIMA
import numpy as np
def ar_series(n):
# generate the series y_t = a1 y_t-1 + eps
np.random.seed(1)
y0 = np.random.rand()
y = [y0]
a1 = 0.7 # the AR coefficient
for i in range(1, n):
y.append(a1 * y[i - 1] + 0.3 * np.random.rand())
return np.array(y)
series = ar_series(10)
model = ARIMA(series, order=(1, 0, 0))
fit = model.fit()
#print(fit.summary())
# const = 0.3441; ar.L1 = 0.6518
print(fit.predict())
y_pred = [0.3441]
for i in range(1, 10):
y_pred.append( 0.6518 * series[i-1])
y_pred = np.array(y_pred)
print(y_pred)
The two series don't match and I have no idea how the in-sample predictions are being calculated?
Found the answer here. I think what I was trying to do is valid only if the process mean is zero.
https://faculty.washington.edu/ezivot/econ584/notes/forecast.pdf

Python 2.7 - How to compare two image?

In python 2.7, I want to compare 2 image to the same, How to do this? please show me step by step. Thanks!
There are many ways to do. By using some opensource Library, like OpenCV, Scikit Learn, TensorFlow.
To compare two images, you can do something like Template Matching in OpenCV
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('img.jpg', 0)
img2 = img.copy()
template = cv2.imread('img2.jpg', 0)
w, h = template.shape[::-1]
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF or cv2. TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img, top_left, bottom_right, 255,2)
plt.subplot(121), plt.imshow(res)
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
or Histogram comparison
import cv2
import numpy as np
base = cv2.imread('test4.jpg')
test1 = cv2.imread('test3.jpg')
test2 = cv2.imread('test5.jpg')
rows,cols = base.shape[:2]
basehsv = cv2.cvtColor(base,cv2.COLOR_BGR2HSV)
test1hsv = cv2.cvtColor(test1,cv2.COLOR_BGR2HSV)
test2hsv = cv2.cvtColor(test2,cv2.COLOR_BGR2HSV)
halfhsv = basehsv[rows/2:rows-1,cols/2:cols-1].copy() # Take lower half of the base image for testing
hbins = 180
sbins = 255
hrange = [0,180]
srange = [0,256]
ranges = hrange+srange # ranges = [0,180,0,256]
ranges=None
histbase = cv2.calcHist(basehsv,[0,1],None,[180,256],ranges)
cv2.normalize(histbase,histbase,0,255,cv2.NORM_MINMAX)
histhalf = cv2.calcHist(halfhsv,[0,1],None,[180,256],ranges)
cv2.normalize(histhalf,histhalf,0,255,cv2.NORM_MINMAX)
histtest1 = cv2.calcHist(test1hsv,[0,1],None,[180,256],ranges)
cv2.normalize(histtest1,histtest1,0,255,cv2.NORM_MINMAX)
histtest2 = cv2.calcHist(test2hsv,[0,1],None,[180,256],ranges)
cv2.normalize(histtest2,histtest2,0,255,cv2.NORM_MINMAX)
for i in xrange(5):
base_base = cv2.compareHist(histbase,histbase,i)
base_half = cv2.compareHist(histbase,histhalf,i)
base_test1 = cv2.compareHist(histbase,histtest1,i)
base_test2 = cv2.compareHist(histbase,histtest2,i)
print "Method: {0} -- base-base: {1} , base-test1: {2}, base_test2: {3}".format(i,base_base,base_test1,base_test2)

pymc3 improving theano compile time before sampling

I'm working with this hierarchical Bayesian model:
import pymc3 as pm
import pandas as pd
import theano.tensor as T
categories = pd.Categorical(df.cat)
n_categories = len(set(categories.codes))
cat_idx = categories.codes
with pm.Model()
mu_a = pm.Normal('mu_a', 0, sd=100**2)
sig_a = pm.Uniform('sig_a', lower=0, upper=100)
alpha = pm.Normal('alpha', mu=mu_a, sd=sig_a, shape=n_categories)
betas = []
for f in FEATURE_LIST:
mu_b = pm.Normal('mu_b_%s' % f, 0, sd=100**2)
sig_b = pm.Uniform('sig_b_%s' % f, lower=0, upper=100)
betas.append(pm.Normal('beta_%s' % f, mu=mu_b, sd=sig_b, shape=n_categories))
logit = 1.0 / (1.0 + T.exp(-(
sum([betas[i][cat_idx] * X_train[f].values for i, f in enumerate(FEATURE_LIST)])
+ alpha[cat_idx]
)))
y_est = pm.Bernoulli('y_est', logit, observed=df.y)
start = pm.find_MAP()
trace = pm.sample(2000, pm.NUTS(), start=start, random_seed=42, njobs=40)
I would imagine that replace my python list of priors and individual additions and multiplications with proper Theano code (perhaps using T.dot?) would improve the performance of the call to sample. How do I set this up in Theano correctly? I imagine that I need to do something like shape=(n_features, n_categories) for my priors, but I'm not sure how to do the category index in the dot product.

Resources