Related
I am making a numerical problem to show as an example and am trying to find an optimal control using gekko for the following problem:
minimize the integral of a*x(t) from 0 to T, where T is the first time x(t) is 0, i.e., it is a random time. The constraints are such that x(t) follows some dynamic f(x(t),u(t)), x(t) >= 0, and u(t) is between 0 and 1.
I followed the tutorials on GEKKO website and youtube for fixed final time, but I could not find any information on a random final time. The following is the current code I have, but how would I be able to move from a fixed final time to a random final time? Any help would be appreciated! Thanks!
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from gekko import GEKKO
# Initial conditions
xhh0 = 3; xhi0 = 0;
xvh0 = 30; xvi0 = 0;
hin0 = 0; vin0 = 0;
tt0 = 0
# Parameters
a1 = 0.1; a2 = 0.1;
b1 = 0.01; b2 = 0.5;
delta1 = 0.1; delta2 = 0.5;
rho1 = 0.3; rho2 = 0.01
mu = 1
# Gekko
m = GEKKO()
# Control variable
u = m.MV(0.5, lb = 0, ub = 1)
# Final time <------------------------ currently a fixed final time
T = 10
# Initialize
xhh, xhi, xvh, xvi, Ah, Av = m.Array(m.Var, 6)
xhh.value = xhh0; xhi.value = xhi0;
xvh.value = xvh0; xvi.value = xvi0;
Ah.value = hin0; Av.value = vin0;
# System dynamics
m.Equations([xhh.dt() == -a1*xhh - mu*u - b1*xhi*xhh,\
xhi.dt() == a1*xhh + b1*xhi*xhh - delta1*xhi - rho1*xhi,\
xvh.dt() == -a2*xvh - mu*(1-u) - b2*xvi*xvh,\
xvi.dt() == a2*xvh + b2*xvi*xvh - delta2*xvi - rho2*xvi,\
Ah.dt() == a1*xhh,\
Av.dt() == a2*xvh])
# Time space
t = np.linspace(0, T, 101)
m.time = t
# initialize with simulation
m.options.IMODE = 7
m.options.NODES = 3
m.solve(disp = False)
# optimization
m.options.IMODE = 6
xhh.LOWER = 0; xhi.LOWER = 0; xvh.LOWER = 0; xvi.LOWER = 0
u.STATUS = 1
m.options.SOLVER = 3
xhh.value = xhh.value.value
xhi.value = xhi.value.value
xvh.value = xvh.value.value
xvi.value = xvi.value.value
Ah.value = Ah.value.value
Av.value = Av.value.value
# Objective function
m.Minimize(Ah + Av)
m.solve()
The final time is adjustable with T = m.FV() and T.STATUS=1 when each differential is divided by T. This scales the problem to any arbitrary final time when t = np.linspace(0,1).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from gekko import GEKKO
# Initial conditions
xhh0 = 3; xhi0 = 0;
xvh0 = 30; xvi0 = 0;
hin0 = 0; vin0 = 0;
tt0 = 0
# Parameters
a1 = 0.1; a2 = 0.1;
b1 = 0.01; b2 = 0.5;
delta1 = 0.1; delta2 = 0.5;
rho1 = 0.3; rho2 = 0.01; mu = 1
# Gekko
m = GEKKO()
# Control variable
u = m.MV(0.5, lb = 0, ub = 1)
# Final time
T = m.FV(10,lb=1e-2,ub=100); T.STATUS = 1
# Initialize
xhh, xhi, xvh, xvi, Ah, Av = m.Array(m.Var, 6)
xhh.value = xhh0; xhi.value = xhi0;
xvh.value = xvh0; xvi.value = xvi0;
Ah.value = hin0; Av.value = vin0;
xhh.LOWER = 0; xhi.LOWER = 0; xvh.LOWER = 0; xvi.LOWER = 0
u.STATUS = 1
# System dynamics
m.Equations([xhh.dt()/T == -a1*xhh - mu*u - b1*xhi*xhh,\
xhi.dt()/T == a1*xhh + b1*xhi*xhh - delta1*xhi - rho1*xhi,\
xvh.dt()/T == -a2*xvh - mu*(1-u) - b2*xvi*xvh,\
xvi.dt()/T == a2*xvh + b2*xvi*xvh - delta2*xvi - rho2*xvi,\
Ah.dt()/T == a1*xhh,\
Av.dt()/T == a2*xvh])
# Time space
t = np.linspace(0, 1, 101)
m.time = t
# optimization
m.options.IMODE = 6
m.options.SOLVER = 3
# Objective function
m.Minimize(Ah + Av)
m.solve()
print('Final time: ', T.value[0])
There may be a missing constraint or some other information because the optimal final time always goes to the lower bound. The Jennings problem is a related example with variable final time.
I am getting an error when I run this code for disc waves. The code is attached.
The Error is in line 137 and 292. Please help in resolving issue.
function waves
% WAVES Wave equation in one and two space dimensions.
% The two-dimensional domains include a pi-by-pi square, a unit disc,
% a three-quarter circular sector and the L-shaped union of three squares.
% The eigenfunctions of the square are sin(m*x)*sin(n*y). With polar
% coordinates, the eigenfunctions of the disc and the sector involve Bessel
% functions. The eigenfunctions of the L-shaped domain also involve
% Bessel functions and are computed by the MATLAB function membranetx.m.
% 2-D eigenvalues and eigenfunctions
m = 11; % Determines number of grid points
speed = 1;
bvals = [1; 0; 0; 0; 0];
t = 0;
while bvals(5) == 0
% Initialize figure
shg
clf reset
set(gcf,'doublebuffer','on','menubar','none','tag','', ...
'numbertitle','off','name','Waves','colormap',hot(64));
for k= 1:5
b(k) = uicontrol('style','toggle','value',bvals(k), ...
'units','normal','position',[.15*k .01 .14 .05]);
end
set(b(1),'style','pop','string', ...
{'1-d','square','disc','sector'})
set(b(2),'string','modes/wave')
set(b(3),'string','slower')
set(b(4),'string','faster')
set(b(5),'string','close')
if bvals(3)==1
speed = speed/sqrt(2);
set(b(3),'value',0);
end
if bvals(4)==1
speed = speed*sqrt(2);
set(b(4),'value',0);
end
bvals = cell2mat(get(b,'value'));
region = bvals(1);
modes = bvals(2)==0;
if region == 1
% 1-D
x = (0:4*m)/(4*m)*pi;
orange = [1 1/3 0];
gray = get(gcf,'color');
if modes
% 1-D modes
for k = 1:4
subplot(2,2,k)
h(k) = plot(x,zeros(size(x)));
axis([0 pi -3/2 3/2])
set(h(k),'color',orange,'linewidth',3)
set(gca,'color',gray','xtick',[],'ytick',[])
end
delta = 0.005*speed;
bvs = bvals;
while all(bvs == bvals)
t = t + delta;
for k = 1:4
u = sin(k*t)*sin(k*x);
set(h(k),'ydata',u)
end
drawnow
bvs = cell2mat(get(b,'value'));
end
else
% 1-D wave
h = plot(x,zeros(size(x)));
axis([0 pi -9/4 9/4])
set(h,'color',orange,'linewidth',3)
set(gca,'color',gray','xtick',[],'ytick',[])
delta = 0.005*speed;
a = 1./(1:4);
bvs = bvals;
while all(bvs == bvals)
t = t + delta;
u = zeros(size(x));
for k = 1:4
u = u + a(k)*sin(k*t)*sin(k*x);
end
set(h,'ydata',u)
drawnow
bvs = cell2mat(get(b,'value'));
end
end
elseif region <= 5
switch region
case 2
% Square
x = (0:2*m)/(2*m)*pi;
y = x';
lambda = zeros(4,1);
V = cell(4,1);
k = 0;
for i = 1:2
for j = 1:2
k = k+1;
lambda(k) = i^2 + j^2;
V{k} = sin(i*y)*sin(j*x);
end
end
ax = [0 pi 0 pi -1.75 1.75];
case 3
% Disc, mu = zeros of J_0(r) and J_1(r)
mu = [bjzeros(0,2) bjzeros(1,2)];
[r,theta] = meshgrid((0:m)/m,(-m:m)/m*pi);
x = r.*cos(theta);
y = r.*sin(theta);
V = cell(4,1);
k = 0;
for j = 0:1
for i = 1:2
k = k+1;
if j == 0
V{k} = besselj(0,mu(k)*r);
else
V{k} = besselj(j,mu(k)*r).*sin(j*theta);
end
V{k} = V{k}/max(max(abs(V{k})));
end
end
lambda = mu.^2;
ax = [-1 1 -1 1 -1.75 1.75];
case 4
% Circular sector , mu = zeros of J_(2/3)(r) and J_(4/3)(r)
mu = [bjzeros(2/3,2) bjzeros(4/3,2)];
[r,theta] = meshgrid((0:m)/m,(3/4)*(0:2*m)/m*pi);
x = r.*cos(theta+pi);
y = r.*sin(theta+pi);
V = cell(4,1);
k = 0;
for j = 1:2
for i = 1:2
k = k+1;
alpha = 2*j/3;
V{k} = besselj(alpha,mu(k)*r).*sin(alpha*theta);
V{k} = V{k}/max(max(abs(V{k})));
end
end
lambda = mu.^2;
ax = [-1 1 -1 1 -1.75 1.75];
case 5\
% L-membrane
x = (-m:m)/m;
y = x';
lambda = zeros(4,1);
V = cell(4,1);
for k = 1:4
[L lambda(k)] = membranetx(k,m,9,9);
L(m+2:2*m+1,m+2:2*m+1) = NaN;
V{k} = rot90(L,-1);
end
ax = [-1 1 -1 1 -1.75 1.75];
end
if modes
% 2-D modes
p = [.02 .52 .02 .52];
q = [.52 .52 .02 .02];
for k = 1:4
axes('position',[p(k) q(k) .46 .46]);
h(k) = surf(x,y,zeros(size(V{k})));
axis(ax)
axis off
view(225,30);
caxis([-1.5 1]);
end
delta = .08*speed;
mu = sqrt(lambda(:));
bvs = bvals;
while all(bvs == bvals)
t = t + delta;
for k = 1:4
U = 1.5*sin(mu(k)*t)*V{k};
set(h(k),'zdata',U)
set(h(k),'cdata',U)
end
drawnow
bvs = cell2mat(get(b,'value'));
end
else
% 2-D wave
h = surf(x,y,zeros(size(V{1})));
axis(ax);
axis off
view(225,30);
caxis([-1.5 1]);
delta = .02*speed;
mu = sqrt(lambda(:));
a = 1.25./(1:4);
bvs = bvals;
while all(bvs == bvals)
t = t + delta;
U = zeros(size(V{1}));
for k = 1:4
U = U + a(k)*sin(mu(k)*t)*V{k};
end
set(h,'zdata',U)
set(h,'cdata',U)
drawnow
bvs = cell2mat(get(b,'value'));
end
end
elseif region == 6
figure
bizcard
set(b(1),'value',1)
end
% Retain uicontrol values
bvals = cell2mat(get(b,'value'));
end
close
% -------------------------------
function z = bjzeros(n,k)
% BJZEROS Zeros of the Bessel function.
% z = bjzeros(n,k) is the first k zeros of besselj(n,x)
% delta must be chosen so that the linear search can take
% steps as large as possible without skipping any zeros.
% delta is approx bjzero(0,2)-bjzero(0,1)
delta = .99*pi;
Jsubn = inline('besselj(n,x)''x','n');
a = n+1;
fa = besselj(n,a);
z = zeros(1,k);
j = 0;
while j < k
b = a + delta;
fb = besselj(n,b);
if sign(fb) ~= sign(fa)
j = j+1;
z(j) = fzerotx(Jsubn,[a b],n);
end
a = b;
fa = fb;
end
I'm trying to develop the adaptive unsharp algorithm described by Polesel et al. in the article "Image Enhancement via Adaptive Unsharp Masking" (link to the article). The core of the algorithm is the minimization of a cost function defined as:
J(m,n) = E[e(m,n)^2] = E[(gd(m,n)-gy(m,n))^2]
where E[] is the statistical expectation and gy(m,n) is:
gy(m,n) = gx(m,n) + lambda1(m,n)*gzx(m,n) + lambda2(m,n)*gzy(m,n);
I want to find lambda1 and lambda2 for each pixel in order to minimize the cost function in each pixel.
Here the code that I wrote so far:
function [ o_sharpened_image ] = AdaptativeUnsharpMask( i_image , t1, t2)
%ADAPTATIVEUNSHARPMASK Summary of this function goes here
% Detailed explanation goes here
if isa(i_image,'dip_image')
i_image = dip_array(i_image);
end
if ~isfloat(i_image)
i_image = im2double(i_image);
end
adh = 4;
adl = 3;
g = [-1 -1 -1; -1 8 -1; -1 -1 -1];
dim = size(i_image);
lambda_x = 0.5*ones(dim);
lambda_y = 0.5*ones(dim);
z_x = conv2(i_image,[-1 2 -1],'same');
z_y = conv2(i_image,[-1; 2; -1],'same');
g_x = conv2(i_image,g,'same');
g_zx = conv2(z_x,g,'same');
g_zy = conv2(z_y,g,'same');
a = ones(dim);
variance_map = colfilt(i_image,[3 3],'sliding',#var);
a(variance_map >= t1 & variance_map < t2) = adh;
a(variance_map >= t2) = adl;
g_d = a.*g_x;
lambda = [lambda_x lambda_y];
lambda0 = lambda;
lambda_min = lsqnonlin(#(lambda) UnsharpCostFunction(lambda,g_d,g_zx,g_zy),lambda0);
o_sharpened_image = i_image + lambda_min(:,1:size(i_image,2)).*z_x + lambda_min(:,size(i_image,2)+1:end).*z_y;
end
Here the code of the cost function:
function [ J ] = UnsharpCostFunction( i_lambda, i_gd, i_gzx, i_gzy )
%UNSHARPCOSTFUNCTION Summary of this function goes herek
gy = i_gd + i_lambda(:,1:size(i_gd,2)).*i_gzx + i_lambda(:,size(i_gd,2)+1:end).*i_gzy;
J = mean((i_gd(:) - gy(:)).^2);
end
For each iteration I print on the command window the value of the J function and it is always the same. What am I doing wrong?
Thank you.
correlation = zeros(length(s1), 1);
sizeNum = 0;
for i = 1 : length(s1) - windowSize - delta
s1Dat = s1(i : i + windowSize);
s2Dat = s2(i + delta : i + delta + windowSize);
if length(find(isnan(s1Dat))) == 0 && length(find(isnan(s2Dat))) == 0
if(var(s1Dat) ~= 0 || var(s2Dat) ~= 0)
sizeNum = sizeNum + 1;
correlation(i) = abs(corr(s1Dat, s2Dat)) ^ 2;
end
end
end
What's happening here:
Run through every values in s1. For every value, get a slice for s1
till s1 + windowSize.
Do the same for s2, only get the slice after an intermediate delta.
If there are no NaN's in any of the two slices and they aren't flat,
then get the correlaton between them and add that to the
correlation matrix.
This is not an answer, I am trying to understand what is being asked.
Take some data:
N = 1e4;
s1 = cumsum(randn(N, 1)); s2 = cumsum(randn(N, 1));
s1(randi(N, 50, 1)) = NaN; s2(randi(N, 50, 1)) = NaN;
windowSize = 200; delta = 100;
Compute correlations:
tic
corr_s = zeros(N - windowSize - delta, 1);
for i = 1:(N - windowSize - delta)
s1Dat = s1(i:(i + windowSize));
s2Dat = s2((i + delta):(i + delta + windowSize));
corr_s(i) = corr(s1Dat, s2Dat);
end
inds = isnan(corr_s);
corr_s(inds) = 0;
corr_s = corr_s .^ 2; % square of correlation coefficient??? Why?
sizeNum = sum(~inds);
toc
This is what you want to do, right? A moving window correlation function? This is a very interesting question indeed …
I'm using matlab to implement a multilayer neural network. In the code I represent
the value of each node AS netValue{k}
the weight between layer k and k + 1 AS weight{k}
etc.
Since these data is three-dimensional, I have to use cell to hold a 2-D matrix to enable matrix multiply.
So it becomes really really slow to train the model, which I expect to have resulted from the usage of cell.
Can anyone tell me how to accelerate this code? Thanks
clc;
close all;
clear all;
input = [-2 : 0.4 : 2;-2:0.4:2];
ican = 4;
depth = 4; % total layer - 1, by convension
[featureNum , sampleNum] = size(input);
levelNum(1) = featureNum;
levelNum(2) = 5;
levelNum(3) = 5;
levelNum(4) = 5;
levelNum(5) = 2;
weight = cell(0);
for k = 1 : depth
weight{k} = rand(levelNum(k+1), levelNum(k)) - 2 * rand(levelNum(k+1) , levelNum(k));
threshold{k} = rand(levelNum(k+1) , 1) - 2 * rand(levelNum(k+1) , 1);
end
runCount = 0;
sumMSE = 1; % init MSE
minError = 1e-5;
afa = 0.1; % step of "gradient ascendence"
% training loop
while(runCount < 100000 & sumMSE > minError)
sumMSE = 0; % sum of MSE
for i = 1 : sampleNum % sample loop
netValue{1} = input(:,i);
for k = 2 : depth
netValue{k} = weight{k-1} * netValue{k-1} + threshold{k-1}; %calculate each layer
netValue{k} = 1 ./ (1 + exp(-netValue{k})); %apply logistic function
end
netValue{depth+1} = weight{depth} * netValue{depth} + threshold{depth}; %output layer
e = 1 + sin((pi / 4) * ican * netValue{1}) - netValue{depth + 1}; %calc error
assistS{depth} = diag(ones(size(netValue{depth+1})));
s{depth} = -2 * assistS{depth} * e;
for k = depth - 1 : -1 : 1
assistS{k} = diag((1-netValue{k+1}).*netValue{k+1});
s{k} = assistS{k} * weight{k+1}' * s{k+1};
end
for k = 1 : depth
weight{k} = weight{k} - afa * s{k} * netValue{k}';
threshold{k} = threshold{k} - afa * s{k};
end
sumMSE = sumMSE + e' * e;
end
sumMSE = sqrt(sumMSE) / sampleNum;
runCount = runCount + 1;
end
x = [-2 : 0.1 : 2;-2:0.1:2];
y = zeros(size(x));
z = 1 + sin((pi / 4) * ican .* x);
% test
for i = 1 : length(x)
netValue{1} = x(:,i);
for k = 2 : depth
netValue{k} = weight{k-1} * netValue{k-1} + threshold{k-1};
netValue{k} = 1 ./ ( 1 + exp(-netValue{k}));
end
y(:, i) = weight{depth} * netValue{depth} + threshold{depth};
end
plot(x(1,:) , y(1,:) , 'r');
hold on;
plot(x(1,:) , z(1,:) , 'g');
hold off;
Have you used the profiler to find out what functions are actually slowing down your code? It shows what lines take the most time to execute.