This source code is an implementation for the epsilon-constraint method. How can I replace the fmincon() function with PSO or GA optimization algorithm (I do not want to use a build-in function).
This code for the main function
x0 = [1 1]; % Starting point
UB = [1 1]; % Upper bound
LB = [0 0]; % Lower bound
options = optimset('LargeScale', 'off', 'MaxFunEvals', 1000, ...
'TolFun', 1e-6, 'TolCon', 1e-6, 'disp', 'off');
% Create constraint bound vector:
n = 50; % Number of Pareto points
eps_min = -1;
eps_max = 0;
eps = eps_min:(eps_max - eps_min)/(n-1):eps_max;
% Solve scalarized problem for each epsilon value:
xopt = zeros(n,length(x0));
for i=1:n
xopt(i,:)=fmincon('obj_eps', x0, [], [], [], [], LB, UB,...
'nonlcon_eps', options, eps(i));
end
This is the constraints function:
function [C,constraintViolation] = nonlcon_eps(x, eps)
constraintViolation= 0;
Ceq = [];
C(1) =x(2)+(x(1)-1)^3;
if C(1) > 0
constraintViolation= constraintViolation+ 1;
end
C(2) = -x(1) - eps;
if C(2) > 0
constraintViolation= constraintViolation+ 1;
end
This is the objective function:
function f = obj_eps(x, ~)
f = 2*x(1)-x(2);
I have replaced this part:
for i=1:n
xopt(i,:)=fmincon('obj_eps', x0, [], [], [], [], LB, UB,'nonlcon_eps', options, eps(i));
end
with this:
maxIteration = 1000;
dim = 2;
n = 50; % Number of Pareto points
eps_min = -1;
eps_max = 0;
EpsVal = eps_min:(eps_max - eps_min)/(n-1):eps_max;
for i=1:n
[gbest]= PSOalgo(N,T,lb,ub,dim,fobj,fcon,EpsVal(i));
end
function [gbest]= PSOalgo(N,maxite,lb,ub,dim,fobj,fcon,EpsVal)
% initialization
wmax=0.9; % inertia weight
wmin=0.4; % inertia weight
c1=2; % acceleration factor
c2=2; % acceleration factor
% pso initialization
X=initialization(N,dim,ub,lb);
v = 0.1*X; % initial velocity
for i=1:N
fitnessX(i,1)= fobj(X(i,:));
end
[fmin0,index0]= min(fitnessX);
pbest= X; % initial pbest
pbestfitness = fitnessX;
gbest= X(index0,:); % initial gbest
gbestfitness = fmin0;
ite=0; % Loop counter
while ite<maxite
w=wmax-(wmax-wmin)*ite/maxite; % update inertial weight
% pso velocity updates
for i=1:N
for j=1:dim
v(i,j)=w*v(i,j)+c1*rand()*(pbest(i,j)- X(i,j)) + c2*rand()*(gbest(1,j)- X(i,j));
end
end
% pso position update
for i=1:N
for j=1:dim
X(i,j)= X(i,j)+v(i,j);
end
% Check boundries
FU=X(i,:)>ub;
FL=X(i,:)<lb;
X(i,:)=(X(i,:).*(~(FU+FL)))+ub.*FU+lb.*FL;
% evaluating fitness
fitnessX(i,1) = fobj(X(i,:));
[~,constraintViolation(i,1)] = fcon(X(i,:), EpsVal);
end
% updating pbest and fitness
for i=1:N
if fitnessX(i,1) < pbestfitness(i,1) && constraintViolation(i,1) == 0
pbest(i,:)= X(i,:);
pbestfitness(i,1)= fitnessX(i,1);
end
[~,constraintViolation(i,1)] = fcon(pbest(i,:), EpsVal);
end
% updating gbest and best fitness
for i=1:N
if pbestfitness(i,1)<gbestfitness && constraintViolation(i,1) == 0
gbest=pbest(i,:);
gbestfitness= pbestfitness(i,1);
end
end
ite = ite+1;
end
end
However, when I run the code the output is only one solution, while it should be 50 (n=50). This because all other solution do not satisfy the constraints. How can modify the code to get one solution at each run(n=1:50)
UPDATE:
I have included PSOalgo() code and did some modification. The output now is 50 solutions.However, the obtained result is not correct.
fmincon () result:
PSO result:
Related
Does this code have mutation, selection, and crossover, just like the original genetic algorithm.
Since this, a hybrid algorithm (i.e PSO with GA) does it use all steps of original GA or skips some
of them.Please do tell me.
I am just new to this and still trying to understand. Thank you.
%%% Hybrid GA and PSO code
function [gbest, gBestScore, all_scores] = QAP_PSO_GA(CreatePopFcn, FitnessFcn, UpdatePosition, ...
nCity, nPlant, nPopSize, nIters)
% Set algorithm parameters
constant = 0.95;
c1 = 1.5; %1.4944; %2;
c2 = 1.5; %1.4944; %2;
w = 0.792 * constant;
% Allocate memory and initialize
gBestScore = inf;
all_scores = inf * ones(nPopSize, nIters);
x = CreatePopFcn(nPopSize, nCity);
v = zeros(nPopSize, nCity);
pbest = x;
% update lbest
cost_p = inf * ones(1, nPopSize); %feval(FUN, pbest');
for i=1:nPopSize
cost_p(i) = FitnessFcn(pbest(i, 1:nPlant));
end
lbest = update_lbest(cost_p, pbest, nPopSize);
for iter = 1 : nIters
if mod(iter,1000) == 0
parents = randperm(nPopSize);
for i = 1:nPopSize
x(i,:) = (pbest(i,:) + pbest(parents(i),:))/2;
% v(i,:) = pbest(parents(i),:) - x(i,:);
% v(i,:) = (v(i,:) + v(parents(i),:))/2;
end
else
% Update velocity
v = w*v + c1*rand(nPopSize,nCity).*(pbest-x) + c2*rand(nPopSize,nCity).*(lbest-x);
% Update position
x = x + v;
x = UpdatePosition(x);
end
% Update pbest
cost_x = inf * ones(1, nPopSize);
for i=1:nPopSize
cost_x(i) = FitnessFcn(x(i, 1:nPlant));
end
s = cost_x<cost_p;
cost_p = (1-s).*cost_p + s.*cost_x;
s = repmat(s',1,nCity);
pbest = (1-s).*pbest + s.*x;
% update lbest
lbest = update_lbest(cost_p, pbest, nPopSize);
% update global best
all_scores(:, iter) = cost_x;
[cost,index] = min(cost_p);
if (cost < gBestScore)
gbest = pbest(index, :);
gBestScore = cost;
end
% draw current fitness
figure(1);
plot(iter,min(cost_x),'cp','MarkerEdgeColor','k','MarkerFaceColor','g','MarkerSize',8)
hold on
str=strcat('Best fitness: ', num2str(min(cost_x)));
disp(str);
end
end
% Function to update lbest
function lbest = update_lbest(cost_p, x, nPopSize)
sm(1, 1)= cost_p(1, nPopSize);
sm(1, 2:3)= cost_p(1, 1:2);
[cost, index] = min(sm);
if index==1
lbest(1, :) = x(nPopSize, :);
else
lbest(1, :) = x(index-1, :);
end
for i = 2:nPopSize-1
sm(1, 1:3)= cost_p(1, i-1:i+1);
[cost, index] = min(sm);
lbest(i, :) = x(i+index-2, :);
end
sm(1, 1:2)= cost_p(1, nPopSize-1:nPopSize);
sm(1, 3)= cost_p(1, 1);
[cost, index] = min(sm);
if index==3
lbest(nPopSize, :) = x(1, :);
else
lbest(nPopSize, :) = x(nPopSize-2+index, :);
end
end
If you are new to Optimization, I recommend you first to study each algorithm separately, then you may study how GA and PSO maybe combined, Although you must have basic mathematical skills in order to understand the operators of the two algorithms and in order to test the efficiency of these algorithm (this is what really matter).
This code chunk is responsible for parent selection and crossover:
parents = randperm(nPopSize);
for i = 1:nPopSize
x(i,:) = (pbest(i,:) + pbest(parents(i),:))/2;
% v(i,:) = pbest(parents(i),:) - x(i,:);
% v(i,:) = (v(i,:) + v(parents(i),:))/2;
end
Is not really obvious how selection randperm is done (I have no experience about Matlab).
And this is the code that is responsible for updating the velocity and position of each particle:
% Update velocity
v = w*v + c1*rand(nPopSize,nCity).*(pbest-x) + c2*rand(nPopSize,nCity).*(lbest-x);
% Update position
x = x + v;
x = UpdatePosition(x);
This version of velocity updating strategy is utilizing what is called Interia-Weight W, which basically mean we are preserving the velocity history of each particle (not completely recomputing it).
It worth mentioning that velocity updating is done more often than crossover (each 1000 iteration).
I'm doing this exercise by Andrew NG about using k-means to reduce the number of colors in an image. It worked correctly but I'm afraid it's a little slow because of all the for loops in the code, so I'd like to vectorize them. But there are those loops that I just can't seem to vectorize effectively. Please help me, thank you very much!
Also if possible please give some feedback on my coding style :)
Here is the link of the exercise, and here is the dataset.
The correct result is given in the link of the exercise.
And here is my code:
function [] = KMeans()
Image = double(imread('bird_small.tiff'));
[rows,cols, RGB] = size(Image);
Points = reshape(Image,rows * cols, RGB);
K = 16;
Centroids = zeros(K,RGB);
s = RandStream('mt19937ar','Seed',0);
% Initialization :
% Pick out K random colours and make sure they are all different
% from each other! This prevents the situation where two of the means
% are assigned to the exact same colour, therefore we don't have to
% worry about division by zero in the E-step
% However, if K = 16 for example, and there are only 15 colours in the
% image, then this while loop will never exit!!! This needs to be
% addressed in the future :(
% TODO : Vectorize this part!
done = false;
while done == false
RowIndex = randperm(s,rows);
ColIndex = randperm(s,cols);
RowIndex = RowIndex(1:K);
ColIndex = ColIndex(1:K);
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
Centroids = sort(Centroids,2);
Centroids = unique(Centroids,'rows');
if size(Centroids,1) == K
done = true;
end
end;
% imshow(imread('bird_small.tiff'))
%
% for i = 1 : K
% hold on;
% plot(RowIndex(i),ColIndex(i),'r+','MarkerSize',50)
% end
eps = 0.01; % Epsilon
IterNum = 0;
while 1
% E-step: Estimate membership given parameters
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
Dist = pdist2(Points,Centroids,'euclidean');
[~, WhichCentroid] = min(Dist,[],2);
% M-step: Estimate parameters given membership
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
% TODO: Vectorize this part!
OldCentroids = Centroids;
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
% Check for convergence: Here we use the L2 distance
IterNum = IterNum + 1;
Margins = sqrt(sum((Centroids - OldCentroids).^2, 2));
if sum(Margins > eps) == 0
break;
end
end
IterNum;
Centroids ;
% Load the larger image
[LargerImage,ColorMap] = imread('bird_large.tiff');
LargerImage = double(LargerImage);
[largeRows,largeCols,NewRGB] = size(LargerImage); % RGB is always 3
% TODO: Vectorize this part!
largeRows
largeCols
NewRGB
% Replace each of the pixel with the nearest centroid
NewPoints = reshape(LargerImage,largeRows * largeCols, NewRGB);
Dist = pdist2(NewPoints,Centroids,'euclidean');
[~,WhichCentroid] = min(Dist,[],2);
NewPoints = Centroids(WhichCentroid,:);
LargerImage = reshape(NewPoints,largeRows,largeCols,NewRGB);
% for i = 1 : largeRows
% for j = 1 : largeCols
% Dist = pdist2(Centroids,reshape(LargerImage(i,j,:),1,RGB),'euclidean');
% [~,WhichCentroid] = min(Dist);
% LargerImage(i,j,:) = Centroids(WhichCentroid,:);
% end
% end
% Display new image
imshow(uint8(round(LargerImage)),ColorMap)
UPDATE: Replaced
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
with
for i = 1 : K
Centroids(i,:) = Image(RowIndex(i),ColIndex(i),:);
end
I think this may be vectorized further by using linear indexing, but for now I should just focus on the while loop since it takes most of the time.
Also when I tried #Dev-iL's suggestion and replaced
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
with
E = sparse(1:size(WhichCentroid), WhichCentroid' , 1, Num, K, Num);
Centroids = (E * spdiags(1./sum(E,1)',0,K,K))' * Points ;
the results were always worse: With K = 16, the first takes 2,414s , the second takes 2,455s ; K = 32, the first takes 4,529s , the second takes 5,022s. Seems like vectorization does not help, but maybe there's something wrong with my code :( .
Replaced
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
with
for i = 1 : K
Centroids(i,:) = Image(RowIndex(i),ColIndex(i),:);
end
I think this may be vectorized further by using linear indexing, but for now I should just focus on the while loop since it takes most of the time.
Also when I tried #Dev-iL's suggestion and replaced
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
with
E = sparse(1:size(WhichCentroid), WhichCentroid' , 1, Num, K, Num);
Centroids = (E * spdiags(1./sum(E,1)',0,K,K))' * Points ;
the results were always worse: With K = 16, the first takes 2,414s , the second takes 2,455s ; K = 32, the first took 4,529s , the second took 5,022s. Seems like vectorization did not help in this case.
However, when I replaced
Dist = pdist2(Points,Centroids,'euclidean');
[~, WhichCentroid] = min(Dist,[],2);
(in the while loop) with
Dist = bsxfun(#minus,dot(Centroids',Centroids',1)' / 2 , Centroids * Points' );
[~, WhichCentroid] = min(Dist,[],1);
WhichCentroid = WhichCentroid';
the code ran much faster, especially when K is large (K=32)
Thank you everyone!
Could someone please run this for me and tell me how long it takes for you? It took my laptop 60s. I can't tell if it's my laptop that's crappy or my code. Probably both.
I just started learning MatLab, so I'm not yet familiar with which functions are better than others for specific tasks. If you have any suggestions on how I could improve this code, it would be greatly appreciated.
function gbp
clear; clc;
zi = 0; % initial position
zf = 100; % final position
Ei = 1; % initial electric field
c = 3*10^8; % speed of light
epsilon = 8.86*10^-12; % permittivity of free space
lambda = 1064*10^-9; % wavelength
k = 2*pi/lambda; % wave number
wi = 1.78*10^-3; % initial waist width (minimum spot size)
zr = (pi*wi^2)/lambda; % Rayleigh range
Ri = zi + zr^2/zi; % initial radius of curvature
qi = 1/(1/Ri-1i*lambda/(pi*wi^2)); % initial complex beam parameter
Psii = atan(real(qi)/imag(qi)); % Gouy phase
mat = [1 zf; 0 1]; % transformation matrix
A = mat(1,1); B = mat(1,2); C = mat(2,1); D = mat(2,2);
qf = (A*qi + B)/(C*qi + D); % final complex beam parameter
wf = sqrt(-lambda/pi*(1/imag(1/qf))); % final spot size
Rf = 1/real(1/qf); % final radius of curvature
Psif = atan(real(qf)/imag(qf)); % final Gouy phase
% Hermite - Gaussian modes function
u = #(z, x, n, w, R, Psi) (2/pi)^(1/4)*sqrt(exp(1i*(2*n+1)*Psi)/(2^n*factorial(n)*w))*...
hermiteH(n,sqrt(2)*x/w).*exp(-x.^2*(1/w^2+1i*k/(2*R))-1i*k*z);
% Complex amplitude coefficients function
a = #(n) exp(1i*k*zi)*integral(#(x) Ei.*conj(u(zi, x, n, wi, Ri, Psii)),-2*wi,2*wi);
%----------------------------------------------------------------------------
xlisti = -0.1:1/10000:0.1; % initial x-axis range
xlistf = -0.1:1/10000:0.1; % final x-axis range
nlist = 0:2:20; % modes range
function Eiplot
Efieldi = zeros(size(xlisti));
for nr = nlist
Efieldi = Efieldi + a(nr).*u(zi, xlisti, nr, wi, Ri, Psii)*exp(-1i*k*zi);
end
Ii = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldi);
end
function Efplot
Efieldf = zeros(size(xlistf));
for nr = nlist
Efieldf = Efieldf + a(nr).*u(zf, xlistf, nr, wf, Rf, Psif)*exp(-1i*k*zf);
end
If = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldf);
end
Eiplot
Efplot
plot(xlisti,real(Ii),xlistf,real(If))
xlabel('x(m)') % x-axis label
ylabel('I(W/m^2)') % y-axis label
end
The cost is coming from the calls to hermiteH -- for every call, this creates a new function using symbolic variables, then evaluates the function at your input. The key to speeding this up is to pre-compute the hermite polynomial functions then evaluate those rather than create them from scratch each time (speedup from ~26 seconds to around 0.75 secs on my computer).
With the changes:
function gbp
x = sym('x');
zi = 0; % initial position
zf = 100; % final position
Ei = 1; % initial electric field
c = 3*10^8; % speed of light
epsilon = 8.86*10^-12; % permittivity of free space
lambda = 1064*10^-9; % wavelength
k = 2*pi/lambda; % wave number
wi = 1.78*10^-3; % initial waist width (minimum spot size)
zr = (pi*wi^2)/lambda; % Rayleigh range
Ri = zi + zr^2/zi; % initial radius of curvature
qi = 1/(1/Ri-1i*lambda/(pi*wi^2)); % initial complex beam parameter
Psii = atan(real(qi)/imag(qi)); % Gouy phase
mat = [1 zf; 0 1]; % transformation matrix
A = mat(1,1); B = mat(1,2); C = mat(2,1); D = mat(2,2);
qf = (A*qi + B)/(C*qi + D); % final complex beam parameter
wf = sqrt(-lambda/pi*(1/imag(1/qf))); % final spot size
Rf = 1/real(1/qf); % final radius of curvature
Psif = atan(real(qf)/imag(qf)); % final Gouy phase
% Hermite - Gaussian modes function
nlist = 0:2:20; % modes range
% precompute hermite polynomials for nlist
hermites = {};
for n = nlist
if n == 0
hermites{n + 1} = #(x)1.0;
else
hermites{n + 1} = matlabFunction(hermiteH(n, x));
end
end
u = #(z, x, n, w, R, Psi) (2/pi)^(1/4)*sqrt(exp(1i*(2*n+1)*Psi)/(2^n*factorial(n)*w))*...
hermites{n + 1}(sqrt(2)*x/w).*exp(-x.^2*(1/w^2+1i*k/(2*R))-1i*k*z);
% Complex amplitude coefficients function
a = #(n) exp(1i*k*zi)*integral(#(x) Ei.*conj(u(zi, x, n, wi, Ri, Psii)),-2*wi,2*wi);
%----------------------------------------------------------------------------
xlisti = -0.1:1/10000:0.1; % initial x-axis range
xlistf = -0.1:1/10000:0.1; % final x-axis range
function Eiplot
Efieldi = zeros(size(xlisti));
for nr = nlist
Efieldi = Efieldi + a(nr).*u(zi, xlisti, nr, wi, Ri, Psii)*exp(-1i*k*zi);
end
Ii = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldi);
end
function Efplot
Efieldf = zeros(size(xlistf));
for nr = nlist
Efieldf = Efieldf + a(nr).*u(zf, xlistf, nr, wf, Rf, Psif)*exp(-1i*k*zf);
end
If = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldf);
end
Eiplot
Efplot
plot(xlisti,real(Ii),xlistf,real(If))
xlabel('x(m)') % x-axis label
ylabel('I(W/m^2)') % y-axis label
end
Here is a matalab program for backpropagation algorithm-
% XOR input for x1 and x2
input = [0 0; 0 1; 1 0; 1 1];
% Desired output of XOR
output = [0;1;1;0];
% Initialize the bias
bias = [-1 -1 -1];
% Learning coefficient
coeff = 0.7;
% Number of learning iterations
iterations = 10000;
% Calculate weights randomly using seed.
rand('state',sum(100.*clock));
weights = -1 +2.*rand(3,3);
for i = 1:iterations
out = zeros(4,1);
numIn = length (input(:,1));
for j = 1:numIn
% Hidden layer
H1 = bias(1,1).*weights(1,1) + input(j,1).*weights(1,2)+ input(j,2).*weights(1,3);
% Send data through sigmoid function 1/1+e^-x
% Note that sigma is a different m file
% that I created to run this operation
x2(1) = sigma(H1);
H2 = bias(1,2).*weights(2,1)+ input(j,1).*weights(2,2)+ input(j,2).*weights(2,3);
x2(2) = sigma(H2);
% Output layer
x3_1 = bias(1,3).*weights(3,1)+ x2(1).*weights(3,2)+ x2(2).*weights(3,3);
out(j) = sigma(x3_1);
% Adjust delta values of weights
% For output layer:
% delta(wi) = xi*delta,
% delta = (1-actual output)*(desired output - actual output)
delta3_1 = out(j).*(1-out(j)).*(output(j)-out(j));
% Propagate the delta backwards into hidden layers
delta2_1 = x2(1).*(1-x2(1)).*weights(3,2).*delta3_1;
delta2_2 = x2(2).*(1-x2(2)).*weights(3,3).*delta3_1;
% Add weight changes to original weights
% And use the new weights to repeat process.
% delta weight = coeff*x*delta
for k = 1:3
if k == 1 % Bias cases
weights(1,k) = weights(1,k) + coeff.*bias(1,1).*delta2_1;
weights(2,k) = weights(2,k) + coeff.*bias(1,2).*delta2_2;
weights(3,k) = weights(3,k) + coeff.*bias(1,3).*delta3_1;
else % When k=2 or 3 input cases to neurons
weights(1,k) = weights(1,k) + coeff.*input(j,1).*delta2_1;
weights(2,k) = weights(2,k) + coeff.*input(j,2).*delta2_2;
weights(3,k) = weights(3,k) + coeff.*x2(k-1).*delta3_1;
end
end
end
end
But its showing error like -
??? Index exceeds matrix dimensions.
Error in ==> sigma at 95
a=varargin{1}; b=varargin{2}; c=varargin{3}; d=varargin{4};
Error in ==> back at 25
x2(1) = sigma(H1);
Please help me out. I am not able to understand the problem. Why there is an error saying index exceeds matrix dimension? Help is needed.
If I am given training data sets and unlabeled datasets, what is the RBF Kernel matrix algorithm for Matlab?
This should be what you are looking for. It is taken from here
% With Fast Computation of the RBF kernel matrix
% To speed up the computation, we exploit a decomposition of the Euclidean distance (norm)
%
% Inputs:
% ker: 'lin','poly','rbf','sam'
% X: data matrix with training samples in rows and features in columns
% X2: data matrix with test samples in rows and features in columns
% sigma: width of the RBF kernel
% b: bias in the linear and polinomial kernel
% d: degree in the polynomial kernel
%
% Output:
% K: kernel matrix
%
% Gustavo Camps-Valls
% 2006(c)
% Jordi (jordi#uv.es), 2007
% 2007-11: if/then -> switch, and fixed RBF kernel
function K = kernelmatrix(ker,X,X2,sigma)
switch ker
case 'lin'
if exist('X2','var')
K = X' * X2;
else
K = X' * X;
end
case 'poly'
if exist('X2','var')
K = (X' * X2 + b).^d;
else
K = (X' * X + b).^d;
end
case 'rbf'
n1sq = sum(X.^2,1);
n1 = size(X,2);
if isempty(X2);
D = (ones(n1,1)*n1sq)' + ones(n1,1)*n1sq -2*X'*X;
else
n2sq = sum(X2.^2,1);
n2 = size(X2,2);
D = (ones(n2,1)*n1sq)' + ones(n1,1)*n2sq -2*X'*X2;
end;
K = exp(-D/(2*sigma^2));
case 'sam'
if exist('X2','var');
D = X'*X2;
else
D = X'*X;
end
K = exp(-acos(D).^2/(2*sigma^2));
otherwise
error(['Unsupported kernel ' ker])
end
To produce the grahm/kernel matrix (matrix of inner products) do:
function [ Kern ] = produce_kernel_matrix( X, t, beta )
%
X = X';
t = t';
X_T_2 = sum(X.^2,2) + sum(t.^2,2).' - (2*X)*t.'; % ||x||^2 + ||t||^2 - 2<x,t>
Kern =exp(-beta*X_T_2); %
end
then to do the interpolation do:
function [ mdl ] = learn_RBF_linear_algebra( X_training_data, Y_training_data, mdl )
%
Kern_matrix = produce_kernel_matrix_bsxfun(X_training_data, mdl.t, mdl.beta); % (N x K)
C = Kern_matrix \ Y_training_data'; % (K x D) = (N x K)' x (N x D)
mdl.c = C; % (K x D)
end
note beta is 1/2sigma