I want to implement parallel Clustering Particle Swarm Optimisation (CPSO) MATLAB using parfor loop but it is taking more elapsed time than using simple for loops.
So please help me execute parallel loop iterations in my code.
I have MATLAB R2013a and Intel core i3 processor.
matlabpool('open', 2)
tic;
for iteration=1:iterations
%CALCULATE EUCLIDEAN DISTANCES TO ALL CENTROIDS
distances=zeros(dataset_size(1),centroids,particles);
for particle=1:particles
for centroid=1:centroids
distance=zeros(dataset_size(1),1);
for data_vector=1:dataset_size(1)
%meas(data_vector,:)
distance(data_vector,1)=norm(swarm_pos(centroid,:,particle)-meas(data_vector,:));
end
distances(:,centroid,particle)=distance;
end
end
%fprintf('\nEnd, meas(dv) is %5.4f\n',meas(data_vector,:));
%ASSIGN MEASURES with CLUSTERS
for particle=1:particles
[value, index] = min(distances(:,:,particle),[],2);
c(:,particle) = index;
end
% PLOT STUFF... CLEAR HANDLERS
delete(pc); delete(txt);
pc = []; txt = [];
% PLOT STUFF...
hold on;
for particle=1:particles
for centroid=1:centroids
if any(c(:,particle) == centroid)
if dimensions == 3
pc = [pc plot3(swarm_pos(centroid,1,particle),swarm_pos(centroid,2,particle),swarm_pos(centroid,3,particle),'*','color',cluster_colors_vector(particle,:))];
elseif dimensions == 2
pc = [pc plot(swarm_pos(centroid,1,particle),swarm_pos(centroid,2,particle),'*','color',cluster_colors_vector(particle,:))];
end
end
end
end
set(pc,{'MarkerSize'},{12})
hold off;
%CALCULATE GLOBAL FITNESS and LOCAL FITNESS:=swarm_fitness
average_fitness = zeros(particles,1);
for particle=1:particles
for centroid = 1 : centroids
if any(c(:,particle) == centroid)
local_fitness=mean(distances(c(:,particle)==centroid,centroid,particle));
average_fitness(particle,1) = average_fitness(particle,1) + local_fitness;
end
end
average_fitness(particle,1) = average_fitness(particle,1) / centroids;
if (average_fitness(particle,1) < swarm_fitness(particle))
swarm_fitness(particle) = average_fitness(particle,1);
swarm_best(:,:,particle) = swarm_pos(:,:,particle); %LOCAL BEST FITNESS
end
end
[global_fitness, index] = min(swarm_fitness); %GLOBAL BEST FITNESS
swarm_overall_pose = swarm_pos(:,:,index); %GLOBAL BEST POSITION
fprintf('global position is %5.4f\n',swarm_overall_pose);
% SOME INFO ON THE COMMAND WINDOW
fprintf('%3d. global fitness is %5.4f\n',iteration,global_fitness);
%uicontrol('Style','text','Position',[40 20 180 20],'String',sprintf('Actual fitness is: %5.4f', global_fitness),'BackgroundColor',get(gcf,'Color'));
pause(simtime);
% SAMPLE r1 AND r2 FROM UNIFORM DISTRIBUTION [0..1]
r1 = rand;
r2 = rand;
% UPDATE CLUSTER CENTROIDS
%p = Par(50);
for particle=1:particles
inertia = w * swarm_vel(:,:,particle);
cognitive = c1 * r1 * (swarm_best(:,:,particle)-swarm_pos(:,:,particle));
social = c2 * r2 * (swarm_overall_pose-swarm_pos(:,:,particle));
vel = inertia+cognitive+social;
swarm_pos(:,:,particle) = swarm_pos(:,:,particle) + vel ; % UPDATE PARTICLE POSE
swarm_vel(:,:,particle) = vel; % UPDATE PARTICLE VEL
end
%stop(p);
%plot(p);
% telapsed = toc(tstart);
% minTime = min(telapsed,minTime);
end
Related
Does this code have mutation, selection, and crossover, just like the original genetic algorithm.
Since this, a hybrid algorithm (i.e PSO with GA) does it use all steps of original GA or skips some
of them.Please do tell me.
I am just new to this and still trying to understand. Thank you.
%%% Hybrid GA and PSO code
function [gbest, gBestScore, all_scores] = QAP_PSO_GA(CreatePopFcn, FitnessFcn, UpdatePosition, ...
nCity, nPlant, nPopSize, nIters)
% Set algorithm parameters
constant = 0.95;
c1 = 1.5; %1.4944; %2;
c2 = 1.5; %1.4944; %2;
w = 0.792 * constant;
% Allocate memory and initialize
gBestScore = inf;
all_scores = inf * ones(nPopSize, nIters);
x = CreatePopFcn(nPopSize, nCity);
v = zeros(nPopSize, nCity);
pbest = x;
% update lbest
cost_p = inf * ones(1, nPopSize); %feval(FUN, pbest');
for i=1:nPopSize
cost_p(i) = FitnessFcn(pbest(i, 1:nPlant));
end
lbest = update_lbest(cost_p, pbest, nPopSize);
for iter = 1 : nIters
if mod(iter,1000) == 0
parents = randperm(nPopSize);
for i = 1:nPopSize
x(i,:) = (pbest(i,:) + pbest(parents(i),:))/2;
% v(i,:) = pbest(parents(i),:) - x(i,:);
% v(i,:) = (v(i,:) + v(parents(i),:))/2;
end
else
% Update velocity
v = w*v + c1*rand(nPopSize,nCity).*(pbest-x) + c2*rand(nPopSize,nCity).*(lbest-x);
% Update position
x = x + v;
x = UpdatePosition(x);
end
% Update pbest
cost_x = inf * ones(1, nPopSize);
for i=1:nPopSize
cost_x(i) = FitnessFcn(x(i, 1:nPlant));
end
s = cost_x<cost_p;
cost_p = (1-s).*cost_p + s.*cost_x;
s = repmat(s',1,nCity);
pbest = (1-s).*pbest + s.*x;
% update lbest
lbest = update_lbest(cost_p, pbest, nPopSize);
% update global best
all_scores(:, iter) = cost_x;
[cost,index] = min(cost_p);
if (cost < gBestScore)
gbest = pbest(index, :);
gBestScore = cost;
end
% draw current fitness
figure(1);
plot(iter,min(cost_x),'cp','MarkerEdgeColor','k','MarkerFaceColor','g','MarkerSize',8)
hold on
str=strcat('Best fitness: ', num2str(min(cost_x)));
disp(str);
end
end
% Function to update lbest
function lbest = update_lbest(cost_p, x, nPopSize)
sm(1, 1)= cost_p(1, nPopSize);
sm(1, 2:3)= cost_p(1, 1:2);
[cost, index] = min(sm);
if index==1
lbest(1, :) = x(nPopSize, :);
else
lbest(1, :) = x(index-1, :);
end
for i = 2:nPopSize-1
sm(1, 1:3)= cost_p(1, i-1:i+1);
[cost, index] = min(sm);
lbest(i, :) = x(i+index-2, :);
end
sm(1, 1:2)= cost_p(1, nPopSize-1:nPopSize);
sm(1, 3)= cost_p(1, 1);
[cost, index] = min(sm);
if index==3
lbest(nPopSize, :) = x(1, :);
else
lbest(nPopSize, :) = x(nPopSize-2+index, :);
end
end
If you are new to Optimization, I recommend you first to study each algorithm separately, then you may study how GA and PSO maybe combined, Although you must have basic mathematical skills in order to understand the operators of the two algorithms and in order to test the efficiency of these algorithm (this is what really matter).
This code chunk is responsible for parent selection and crossover:
parents = randperm(nPopSize);
for i = 1:nPopSize
x(i,:) = (pbest(i,:) + pbest(parents(i),:))/2;
% v(i,:) = pbest(parents(i),:) - x(i,:);
% v(i,:) = (v(i,:) + v(parents(i),:))/2;
end
Is not really obvious how selection randperm is done (I have no experience about Matlab).
And this is the code that is responsible for updating the velocity and position of each particle:
% Update velocity
v = w*v + c1*rand(nPopSize,nCity).*(pbest-x) + c2*rand(nPopSize,nCity).*(lbest-x);
% Update position
x = x + v;
x = UpdatePosition(x);
This version of velocity updating strategy is utilizing what is called Interia-Weight W, which basically mean we are preserving the velocity history of each particle (not completely recomputing it).
It worth mentioning that velocity updating is done more often than crossover (each 1000 iteration).
This source code is an implementation for the epsilon-constraint method. How can I replace the fmincon() function with PSO or GA optimization algorithm (I do not want to use a build-in function).
This code for the main function
x0 = [1 1]; % Starting point
UB = [1 1]; % Upper bound
LB = [0 0]; % Lower bound
options = optimset('LargeScale', 'off', 'MaxFunEvals', 1000, ...
'TolFun', 1e-6, 'TolCon', 1e-6, 'disp', 'off');
% Create constraint bound vector:
n = 50; % Number of Pareto points
eps_min = -1;
eps_max = 0;
eps = eps_min:(eps_max - eps_min)/(n-1):eps_max;
% Solve scalarized problem for each epsilon value:
xopt = zeros(n,length(x0));
for i=1:n
xopt(i,:)=fmincon('obj_eps', x0, [], [], [], [], LB, UB,...
'nonlcon_eps', options, eps(i));
end
This is the constraints function:
function [C,constraintViolation] = nonlcon_eps(x, eps)
constraintViolation= 0;
Ceq = [];
C(1) =x(2)+(x(1)-1)^3;
if C(1) > 0
constraintViolation= constraintViolation+ 1;
end
C(2) = -x(1) - eps;
if C(2) > 0
constraintViolation= constraintViolation+ 1;
end
This is the objective function:
function f = obj_eps(x, ~)
f = 2*x(1)-x(2);
I have replaced this part:
for i=1:n
xopt(i,:)=fmincon('obj_eps', x0, [], [], [], [], LB, UB,'nonlcon_eps', options, eps(i));
end
with this:
maxIteration = 1000;
dim = 2;
n = 50; % Number of Pareto points
eps_min = -1;
eps_max = 0;
EpsVal = eps_min:(eps_max - eps_min)/(n-1):eps_max;
for i=1:n
[gbest]= PSOalgo(N,T,lb,ub,dim,fobj,fcon,EpsVal(i));
end
function [gbest]= PSOalgo(N,maxite,lb,ub,dim,fobj,fcon,EpsVal)
% initialization
wmax=0.9; % inertia weight
wmin=0.4; % inertia weight
c1=2; % acceleration factor
c2=2; % acceleration factor
% pso initialization
X=initialization(N,dim,ub,lb);
v = 0.1*X; % initial velocity
for i=1:N
fitnessX(i,1)= fobj(X(i,:));
end
[fmin0,index0]= min(fitnessX);
pbest= X; % initial pbest
pbestfitness = fitnessX;
gbest= X(index0,:); % initial gbest
gbestfitness = fmin0;
ite=0; % Loop counter
while ite<maxite
w=wmax-(wmax-wmin)*ite/maxite; % update inertial weight
% pso velocity updates
for i=1:N
for j=1:dim
v(i,j)=w*v(i,j)+c1*rand()*(pbest(i,j)- X(i,j)) + c2*rand()*(gbest(1,j)- X(i,j));
end
end
% pso position update
for i=1:N
for j=1:dim
X(i,j)= X(i,j)+v(i,j);
end
% Check boundries
FU=X(i,:)>ub;
FL=X(i,:)<lb;
X(i,:)=(X(i,:).*(~(FU+FL)))+ub.*FU+lb.*FL;
% evaluating fitness
fitnessX(i,1) = fobj(X(i,:));
[~,constraintViolation(i,1)] = fcon(X(i,:), EpsVal);
end
% updating pbest and fitness
for i=1:N
if fitnessX(i,1) < pbestfitness(i,1) && constraintViolation(i,1) == 0
pbest(i,:)= X(i,:);
pbestfitness(i,1)= fitnessX(i,1);
end
[~,constraintViolation(i,1)] = fcon(pbest(i,:), EpsVal);
end
% updating gbest and best fitness
for i=1:N
if pbestfitness(i,1)<gbestfitness && constraintViolation(i,1) == 0
gbest=pbest(i,:);
gbestfitness= pbestfitness(i,1);
end
end
ite = ite+1;
end
end
However, when I run the code the output is only one solution, while it should be 50 (n=50). This because all other solution do not satisfy the constraints. How can modify the code to get one solution at each run(n=1:50)
UPDATE:
I have included PSOalgo() code and did some modification. The output now is 50 solutions.However, the obtained result is not correct.
fmincon () result:
PSO result:
I'm writing a "Peak finder" in Matlab. I've never used Matlab or anything similar before this project, so I'm new to "vectorizing" my code. Essentially, the program needs to take a video of molecules and plot circles on the molecules present in each frame of the video. If a molecule is crowded then it gets a red circle, but if it is not crowded it gets a green circle.
My problem is that some of these videos have 2000 frames and my program takes up to ~25 seconds to process a single frame, which is not practical.
Using tic and toc I've found the trouble maker: A for-loop which calls a function that contains a for-loop.
function getPeaks( orgnl_img, processed_img, cut_off, radius )
% find large peaks peaks by thresholding, i.e. you accept a peak only
% if its more than 'threshold' higher than its neighbors
threshold = 2*std2(orgnl_img);
peaks = (orgnl_img - processed_img) > threshold;
% m and n are dimensions of the frame, named peaks
[m, n] = size(peaks);
cc_centroids = regionprops(peaks, 'centroid');
% Pre-allocate arrays
x_centroid = zeros(1, length(cc_centroids));
y_centroid = zeros(1, length(cc_centroids));
for i = 1:length(cc_centroids)
% Extract the x and y components from cc_centroids struct
x_centroid(i) = cc_centroids(i).Centroid(1);
y_centroid(i) = cc_centroids(i).Centroid(2);
row = int64(x_centroid(i));
col = int64(y_centroid(i));
% Assure that script doesnt attempt to exceed frame
if col-2 > 0 && row-2 > 0 && col+2 < m && row+2 < n
region_of_interest = orgnl_img(col-2:col+2,row-2:row+2);
local_intensity = max(region_of_interest(:));
% Do not plot circle when intensity is 'cut off' or lower
if local_intensity > cut_off
dist_bool = findDistance(cc_centroids, x_centroid(i), y_centroid(i), radius);
if dist_bool == 1
color = 'g';
else
color = 'r';
end
plotCircle(color, x_centroid(i), y_centroid(i), radius)
end
end
end
end
And here is the findDistance function which contains another for-loop and determines if the circles overlap:
function dist_bool = findDistance( all_centroids, x_crrnt, y_crrnt, radius )
x_nearby = zeros(1, length(all_centroids));
y_nearby = zeros(1, length(all_centroids));
for i = 1:length(all_centroids)
if all_centroids(i).Centroid(1) < (x_crrnt+2*radius) &&...
all_centroids(i).Centroid(1) > (x_crrnt-2*radius) &&...
all_centroids(i).Centroid(2) < (y_crrnt+2*radius) &&...
all_centroids(i).Centroid(2) > (y_crrnt-2*radius)
x_nearby(i) = all_centroids(i).Centroid(1);
y_nearby(i) = all_centroids(i).Centroid(2);
pts_of_interest = [x_nearby(i),y_nearby(i);x_crrnt,y_crrnt];
dist = pdist(pts_of_interest);
if dist == 0 || dist > 2*radius
dist_bool = 1;
else
dist_bool = 0;
break
end
end
end
end
I think that there must be much improvement to be done here. I would appreciate any advice.
Thanks! :)
UPDATE: Here are the profiler results. The first section of code is the "getPeaks" function
http://i.stack.imgur.com/VaLdH.png
The hold comes from the plot circle function:
function plotCircle( color, x_centroid, y_centroid, radius )
hold on;
th = 0:pi/50:2*pi;
xunit = radius * cos(th) + x_centroid;
yunit = radius * sin(th) + y_centroid;
plot(xunit, yunit, color);
hold off;
end
Could someone please run this for me and tell me how long it takes for you? It took my laptop 60s. I can't tell if it's my laptop that's crappy or my code. Probably both.
I just started learning MatLab, so I'm not yet familiar with which functions are better than others for specific tasks. If you have any suggestions on how I could improve this code, it would be greatly appreciated.
function gbp
clear; clc;
zi = 0; % initial position
zf = 100; % final position
Ei = 1; % initial electric field
c = 3*10^8; % speed of light
epsilon = 8.86*10^-12; % permittivity of free space
lambda = 1064*10^-9; % wavelength
k = 2*pi/lambda; % wave number
wi = 1.78*10^-3; % initial waist width (minimum spot size)
zr = (pi*wi^2)/lambda; % Rayleigh range
Ri = zi + zr^2/zi; % initial radius of curvature
qi = 1/(1/Ri-1i*lambda/(pi*wi^2)); % initial complex beam parameter
Psii = atan(real(qi)/imag(qi)); % Gouy phase
mat = [1 zf; 0 1]; % transformation matrix
A = mat(1,1); B = mat(1,2); C = mat(2,1); D = mat(2,2);
qf = (A*qi + B)/(C*qi + D); % final complex beam parameter
wf = sqrt(-lambda/pi*(1/imag(1/qf))); % final spot size
Rf = 1/real(1/qf); % final radius of curvature
Psif = atan(real(qf)/imag(qf)); % final Gouy phase
% Hermite - Gaussian modes function
u = #(z, x, n, w, R, Psi) (2/pi)^(1/4)*sqrt(exp(1i*(2*n+1)*Psi)/(2^n*factorial(n)*w))*...
hermiteH(n,sqrt(2)*x/w).*exp(-x.^2*(1/w^2+1i*k/(2*R))-1i*k*z);
% Complex amplitude coefficients function
a = #(n) exp(1i*k*zi)*integral(#(x) Ei.*conj(u(zi, x, n, wi, Ri, Psii)),-2*wi,2*wi);
%----------------------------------------------------------------------------
xlisti = -0.1:1/10000:0.1; % initial x-axis range
xlistf = -0.1:1/10000:0.1; % final x-axis range
nlist = 0:2:20; % modes range
function Eiplot
Efieldi = zeros(size(xlisti));
for nr = nlist
Efieldi = Efieldi + a(nr).*u(zi, xlisti, nr, wi, Ri, Psii)*exp(-1i*k*zi);
end
Ii = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldi);
end
function Efplot
Efieldf = zeros(size(xlistf));
for nr = nlist
Efieldf = Efieldf + a(nr).*u(zf, xlistf, nr, wf, Rf, Psif)*exp(-1i*k*zf);
end
If = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldf);
end
Eiplot
Efplot
plot(xlisti,real(Ii),xlistf,real(If))
xlabel('x(m)') % x-axis label
ylabel('I(W/m^2)') % y-axis label
end
The cost is coming from the calls to hermiteH -- for every call, this creates a new function using symbolic variables, then evaluates the function at your input. The key to speeding this up is to pre-compute the hermite polynomial functions then evaluate those rather than create them from scratch each time (speedup from ~26 seconds to around 0.75 secs on my computer).
With the changes:
function gbp
x = sym('x');
zi = 0; % initial position
zf = 100; % final position
Ei = 1; % initial electric field
c = 3*10^8; % speed of light
epsilon = 8.86*10^-12; % permittivity of free space
lambda = 1064*10^-9; % wavelength
k = 2*pi/lambda; % wave number
wi = 1.78*10^-3; % initial waist width (minimum spot size)
zr = (pi*wi^2)/lambda; % Rayleigh range
Ri = zi + zr^2/zi; % initial radius of curvature
qi = 1/(1/Ri-1i*lambda/(pi*wi^2)); % initial complex beam parameter
Psii = atan(real(qi)/imag(qi)); % Gouy phase
mat = [1 zf; 0 1]; % transformation matrix
A = mat(1,1); B = mat(1,2); C = mat(2,1); D = mat(2,2);
qf = (A*qi + B)/(C*qi + D); % final complex beam parameter
wf = sqrt(-lambda/pi*(1/imag(1/qf))); % final spot size
Rf = 1/real(1/qf); % final radius of curvature
Psif = atan(real(qf)/imag(qf)); % final Gouy phase
% Hermite - Gaussian modes function
nlist = 0:2:20; % modes range
% precompute hermite polynomials for nlist
hermites = {};
for n = nlist
if n == 0
hermites{n + 1} = #(x)1.0;
else
hermites{n + 1} = matlabFunction(hermiteH(n, x));
end
end
u = #(z, x, n, w, R, Psi) (2/pi)^(1/4)*sqrt(exp(1i*(2*n+1)*Psi)/(2^n*factorial(n)*w))*...
hermites{n + 1}(sqrt(2)*x/w).*exp(-x.^2*(1/w^2+1i*k/(2*R))-1i*k*z);
% Complex amplitude coefficients function
a = #(n) exp(1i*k*zi)*integral(#(x) Ei.*conj(u(zi, x, n, wi, Ri, Psii)),-2*wi,2*wi);
%----------------------------------------------------------------------------
xlisti = -0.1:1/10000:0.1; % initial x-axis range
xlistf = -0.1:1/10000:0.1; % final x-axis range
function Eiplot
Efieldi = zeros(size(xlisti));
for nr = nlist
Efieldi = Efieldi + a(nr).*u(zi, xlisti, nr, wi, Ri, Psii)*exp(-1i*k*zi);
end
Ii = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldi);
end
function Efplot
Efieldf = zeros(size(xlistf));
for nr = nlist
Efieldf = Efieldf + a(nr).*u(zf, xlistf, nr, wf, Rf, Psif)*exp(-1i*k*zf);
end
If = 1/2*c*epsilon*arrayfun(#(x)x.*conj(x),Efieldf);
end
Eiplot
Efplot
plot(xlisti,real(Ii),xlistf,real(If))
xlabel('x(m)') % x-axis label
ylabel('I(W/m^2)') % y-axis label
end
For a fixed and given tform, the imwarp command in the Image Processing Toolbox
B = imwarp(A,tform)
is linear with respect to A, meaning there exists some sparse matrix W, depending on tform but independent of A, such that the above can be equivalently implemented
B(:)=W*A(:)
for all A of fixed known dimensions [n,n]. My question is whether there are fast/efficient options for computing W. The matrix form is necessary when I need the transpose operation W.'*B(:), or if I need to do W\B(:) or similar linear algebraic things which I can't do directly through imwarp alone.
I know that it is possible to compute W column-by-column by doing
E=zeros(n);
W=spalloc(n^2,n^2,4*n^2);
for i=1:n^2
E(i)=1;
tmp=imwarp(E,tform);
E(i)=0;
W(:,i)=tmp(:);
end
but this is brute force and slow.
The routine FUNC2MAT is somewhat more optimal in that it uses the loop to compute/gather the sparse entry data I,J,S of each column W(:,i). Then, after the loop, it uses this to construct the overall sparse matrix. It also offers the option of using a PARFOR loop. However, this is still slower than I would like.
Can anyone suggest more speed-optimal alternatives?
EDIT:
For those uncomfortable with my claim that imwarp(A,tform) is linear w.r.t. A, I include the demo script below, which tests that the superposition property is satisfied for random input images and tform data. It can be run repeatedly to see that the nonlinearityError is always small, and easily attributable to floating point noise.
tform=affine2d(rand(3,2));
%tform=projective2d(rand(3));
fun=#(A) imwarp(A,tform,'cubic');
I1=rand(100); I2=rand(100);
c1=rand; c2=rand;
LHS=fun(c1*I1+c2*I2); %left hand side
RHS=c1*fun(I1)+c2*fun(I2); %right hand side
linearityError = norm(LHS(:)-RHS(:),'inf')
That's actually pretty simple:
W = sparse(B(:)/A(:));
Note that W is not unique, but this operation probably produces the most sparse result. Another way to calculate it would be
W = sparse( B(:) * pinv(A(:)) );
but that results in a much less sparse (yet still valid) result.
I constructed the warping matrix using the optical flow fields [u,v] and it is working well for my application
% this function computes the warping matrix
% M x N is the size of the image
function [ Fw ] = generateFwi( u,v,M,N )
Fw = zeros(M*N, M*N);
k =1;
for i=1:M
for j= 1:N
newcoord(1) = i+u(i,j);
newcoord(2) = j+v(i,j);
newi = newcoord(1);
newj = newcoord(2);
if newi >0 && newj >0
newi1x = floor(newi);
newi1y = floor(newj);
newi2x = floor(newi);
newi2y = ceil(newj);
newi3x = ceil(newi); % four nearest points to the given point
newi3y = floor(newj);
newi4x = ceil(newi);
newi4y = ceil(newj);
x1 = [newi,newj;newi1x,newi1y];
x2 = [newi,newj;newi2x,newi2y];
x3 = [newi,newj;newi3x,newi3y];
x4 = [newi,newj;newi4x,newi4y];
w1 = pdist(x1,'euclidean');
w2 = pdist(x2,'euclidean');
w3 = pdist(x3,'euclidean');
w4 = pdist(x4,'euclidean');
if ceil(newi) == floor(newi) && ceil(newj)==floor(newj) % both the new coordinates are integers
Fw(k,(newi1x-1)*N+newi1y) = 1;
else if ceil(newi) == floor(newi) % one of the new coordinates is an integer
w = w1+w2;
w1new = w1/w;
w2new = w2/w;
W = w1new*w2new;
y1coord = (newi1x-1)*N+newi1y;
y2coord = (newi2x-1)*N+newi2y;
if y1coord <= M*N && y2coord <=M*N
Fw(k,y1coord) = W/w2new;
Fw(k,y2coord) = W/w1new;
end
else if ceil(newj) == floor(newj) % one of the new coordinates is an integer
w = w1+w3;
w1 = w1/w;
w3 = w3/w;
W = w1*w3;
y1coord = (newi1x-1)*N+newi1y;
y2coord = (newi3x-1)*N+newi3y;
if y1coord <= M*N && y2coord <=M*N
Fw(k,y1coord) = W/w3;
Fw(k,y2coord) = W/w1;
end
else % both the new coordinates are not integers
w = w1+w2+w3+w4;
w1 = w1/w;
w2 = w2/w;
w3 = w3/w;
w4 = w4/w;
W = w1*w2*w3 + w2*w3*w4 + w3*w4*w1 + w4*w1*w2;
y1coord = (newi1x-1)*N+newi1y;
y2coord = (newi2x-1)*N+newi2y;
y3coord = (newi3x-1)*N+newi3y;
y4coord = (newi4x-1)*N+newi4y;
if y1coord <= M*N && y2coord <= M*N && y3coord <= M*N && y4coord <= M*N
Fw(k,y1coord) = w2*w3*w4/W;
Fw(k,y2coord) = w3*w4*w1/W;
Fw(k,y3coord) = w4*w1*w2/W;
Fw(k,y4coord) = w1*w2*w3/W;
end
end
end
end
else
Fw(k,k) = 1;
end
k=k+1;
end
end
end