Run-length decoding in MATLAB - performance

For clever usage of linear indexing or accumarray, I've sometimes felt the need to generate sequences based on run-length encoding. As there is no built-in function for this, I am asking for the most efficient way to decode a sequence encoded in RLE.
Specification:
As to make this a fair comparison I would like to set up some specifications for the function:
If optional second argument values of same length is specified, the output should be according to those values, otherwise just the values 1:length(runLengths).
Gracefully handle:
zeros in runLengths
values being a cell array.
Output vector should have same column/row format as runLengths
In short: The function should be equivalent to the following code:
function V = runLengthDecode(runLengths, values)
[~,V] = histc(1:sum(runLengths), cumsum([1,runLengths(:).']));
if nargin>1
V = reshape(values(V), 1, []);
end
V = shiftdim(V, ~isrow(runLengths));
end
Examples:
Here are a few test cases
runLengthDecode([0,1,0,2])
runLengthDecode([0,1,0,4], [1,2,4,5].')
runLengthDecode([0,1,0,2].', [10,20,30,40])
runLengthDecode([0,3,1,0], {'a','b',1,2})
and their output:
>> runLengthDecode([0,1,0,2])
ans =
2 4 4
>> runLengthDecode([0,1,0,4], [1,2,4,5].')
ans =
2 5 5 5 5
>> runLengthDecode([0,1,0,2].', [10,20,30,40])
ans =
20
40
40
>> runLengthDecode([0,3,1,0],{'a','b',1,2})
ans =
'b' 'b' 'b' [1]

To find out which one is the most efficient solution, we provide a test-script that evaluates the performance. The first plot depicts runtimes for growing length of the vector runLengths, where the entries are uniformly distributed with maximum length 200. A modification of gnovice's solution is the fastest, with Divakar's solution being second place.
This second plot uses nearly the same test data except it includes an initial run of length 2000. This mostly affects both bsxfun solutions, whereas the other solutions will perform quite similarly.
The tests suggest that a modification of gnovice's original answer will be the most performant.
If you want to do the speed comparison yourself, here's the code used to generate the plot above.
function theLastRunLengthDecodingComputationComparisonYoullEverNeed()
Funcs = {#knedlsepp0, ...
#LuisMendo1bsxfun, ...
#LuisMendo2cumsum, ...
#gnovice3cumsum, ...
#Divakar4replicate_bsxfunmask, ...
#knedlsepp5cumsumaccumarray
};
%% Growing number of runs, low maximum sizes in runLengths
ns = 2.^(1:25);
paramGenerators{1} = arrayfun(#(n) #(){randi(200,n,1)}, ns,'uni',0);
paramGenerators{2} = arrayfun(#(n) #(){[2000;randi(200,n,1)]}, ns,'uni',0);
for i = 1:2
times = compareFunctions(Funcs, paramGenerators{i}, 0.5);
finishedComputations = any(~isnan(times),2);
h = figure('Visible', 'off');
loglog(ns(finishedComputations), times(finishedComputations,:));
legend(cellfun(#func2str,Funcs,'uni',0),'Location','NorthWest','Interpreter','none');
title('Runtime comparison for run length decoding - Growing number of runs');
xlabel('length(runLengths)'); ylabel('seconds');
print(['-f',num2str(h)],'-dpng','-r100',['RunLengthComparsion',num2str(i)]);
end
end
function times = compareFunctions(Funcs, paramGenerators, timeLimitInSeconds)
if nargin<3
timeLimitInSeconds = Inf;
end
times = zeros(numel(paramGenerators),numel(Funcs));
for i = 1:numel(paramGenerators)
Params = feval(paramGenerators{i});
for j = 1:numel(Funcs)
if max(times(:,j))<timeLimitInSeconds
times(i,j) = timeit(#()feval(Funcs{j},Params{:}));
else
times(i,j) = NaN;
end
end
end
end
%% // #################################
%% // HERE COME ALL THE FANCY FUNCTIONS
%% // #################################
function V = knedlsepp0(runLengths, values)
[~,V] = histc(1:sum(runLengths), cumsum([1,runLengths(:).']));%'
if nargin>1
V = reshape(values(V), 1, []);
end
V = shiftdim(V, ~isrow(runLengths));
end
%% // #################################
function V = LuisMendo1bsxfun(runLengths, values)
nn = 1:numel(runLengths);
if nargin==1 %// handle one-input case
values = nn;
end
V = values(nonzeros(bsxfun(#times, nn,...
bsxfun(#le, (1:max(runLengths)).', runLengths(:).'))));
if size(runLengths,1)~=size(values,1) %// adjust orientation of output vector
V = V.'; %'
end
end
%% // #################################
function V = LuisMendo2cumsum(runLengths, values)
if nargin==1 %// handle one-input case
values = 1:numel(runLengths);
end
[ii, ~, jj] = find(runLengths(:));
V(cumsum(jj(end:-1:1))) = 1;
V = values(ii(cumsum(V(end:-1:1))));
if size(runLengths,1)~=size(values,1) %// adjust orientation of output vector
V = V.'; %'
end
end
%% // #################################
function V = gnovice3cumsum(runLengths, values)
isColumnVector = size(runLengths,1)>1;
if nargin==1 %// handle one-input case
values = 1:numel(runLengths);
end
values = reshape(values(runLengths~=0),1,[]);
if isempty(values) %// If there are no runs
V = []; return;
end
runLengths = nonzeros(runLengths(:));
index = zeros(1,sum(runLengths));
index(cumsum([1;runLengths(1:end-1)])) = 1;
V = values(cumsum(index));
if isColumnVector %// adjust orientation of output vector
V = V.'; %'
end
end
%% // #################################
function V = Divakar4replicate_bsxfunmask(runLengths, values)
if nargin==1 %// Handle one-input case
values = 1:numel(runLengths);
end
%// Do size checking to make sure that both values and runlengths are row vectors.
if size(values,1) > 1
values = values.'; %//'
end
if size(runLengths,1) > 1
yes_transpose_output = false;
runLengths = runLengths.'; %//'
else
yes_transpose_output = true;
end
maxlen = max(runLengths);
all_values = repmat(values,maxlen,1);
%// OR all_values = values(ones(1,maxlen),:);
V = all_values(bsxfun(#le,(1:maxlen)',runLengths)); %//'
%// Bring the shape of V back to the shape of runlengths
if yes_transpose_output
V = V.'; %//'
end
end
%% // #################################
function V = knedlsepp5cumsumaccumarray(runLengths, values)
isRowVector = size(runLengths,2)>1;
%// Actual computation using column vectors
V = cumsum(accumarray(cumsum([1; runLengths(:)]), 1));
V = V(1:end-1);
%// In case of second argument
if nargin>1
V = reshape(values(V),[],1);
end
%// If original was a row vector, transpose
if isRowVector
V = V.'; %'
end
end

Approach 1
This should be reasonably fast. It uses
bsxfun to create a matrix of size numel(runLengths)xnumel(runLengths), so it may not be suitable for huge input sizes.
function V = runLengthDecode(runLengths, values)
nn = 1:numel(runLengths);
if nargin==1 %// handle one-input case
values = nn;
end
V = values(nonzeros(bsxfun(#times, nn,...
bsxfun(#le, (1:max(runLengths)).', runLengths(:).'))));
if size(runLengths,1)~=size(values,1) %// adjust orientation of output vector
V = V.';
end
Approach 2
This approach, based on cumsum, is an adaptation of that used in this other answer. It uses less memory than approach 1.
function V = runLengthDecode2(runLengths, values)
if nargin==1 %// handle one-input case
values = 1:numel(runLengths);
end
[ii, ~, jj] = find(runLengths(:));
V(cumsum(jj(end:-1:1))) = 1;
V = values(ii(cumsum(V(end:-1:1))));
if size(runLengths,1)~=size(values,1) %// adjust orientation of output vector
V = V.';
end

As of R2015a, the function repelem is the best choice to do this:
function V = runLengthDecode(runLengths, values)
if nargin<2
values = 1:numel(runLengths);
end
V = repelem(values, runLengths);
end
For versions before R2015a this is a fast alternative:
Alternative to repelem:
I had the feeling gnovice's approach could be improved upon by using a better zero-runLength fix than the preprocessing mask.
So I gave accumarray a shot. It seems this gives the solution yet another boost:
function V = runLengthDecode(runLengths, values)
%// Actual computation using column vectors
V = cumsum(accumarray(cumsum([1; runLengths(:)]), 1));
V = V(1:end-1);
%// In case of second argument
if nargin>1
V = reshape(values(V),[],1);
end
%// If original was a row vector, transpose
if size(runLengths,2)>1
V = V.'; %'
end
end

The solution presented here basically does the run-length decoding in two steps -
Replicate all values upto the maximum number of runLengths.
Use bsxfun's masking capability to select from each column the corresponding runlengths.
Rest of the stuffs inside the function code are to take care of the input and output sizes to satisfy the requirements set in the question.
The function code listed next would be a "cleaned-up" version of one of my previous answers to a similar problem. Here's the code -
function V = replicate_bsxfunmask(runLengths, values)
if nargin==1 %// Handle one-input case
values = 1:numel(runLengths);
end
%// Do size checking to make sure that both values and runlengths are row vectors.
if size(values,1) > 1
values = values.'; %//'
end
if size(runLengths,1) > 1
yes_transpose_output = false;
runLengths = runLengths.'; %//'
else
yes_transpose_output = true;
end
maxlen = max(runLengths);
all_values = repmat(values,maxlen,1);
%// OR all_values = values(ones(1,maxlen),:);
V = all_values(bsxfun(#le,(1:maxlen)',runLengths)); %//'
%// Bring the shape of V back to the shape of runlengths
if yes_transpose_output
V = V.'; %//'
end
return;
The code listed next would be a hybrid (cumsum + replicate_bsxfunmask) and would be suitable when you have a good number of outliers or really huge outliers. Also to keep it simple, for now this works on numeric arrays only. Here's the implementation -
function out = replicate_bsxfunmask_v2(runLengths, values)
if nargin==1 %// Handle one-input case
values = 1:numel(runLengths);
end
if size(values,1) > 1
values = values.'; %//'
end
if size(runLengths,1) > 1
yes_transpose_output = true;
runLengths = runLengths.'; %//'
else
yes_transpose_output = false;
end
%// Regularize inputs
values = values(runLengths>0);
runLengths = runLengths(runLengths>0);
%// Main portion of code
thresh = 200; %// runlengths threshold that are to be processed with cumsum
crunLengths = cumsum(runLengths); %%// cumsums of runlengths
mask = runLengths >= thresh; %// mask of runlengths above threshold
starts = [1 crunLengths(1:end-1)+1]; %// starts of each group of runlengths
mask_ind = find(mask); %// indices of mask
post_mark = starts(mask);
negt_mark = crunLengths(mask)+1;
if ~isempty(negt_mark) && negt_mark(end) > crunLengths(end)
negt_mark(end) = [];
end
%// Create array & set starts markers for starts of runlengths above thresh
marked_out = zeros(1,crunLengths(end));
marked_out(post_mark) = mask_ind;
marked_out(negt_mark) = marked_out(negt_mark) -1*mask_ind(1:numel(negt_mark));
%// Setup output array with the cumsumed version of marked array
out = cumsum(marked_out);
%// Mask for final ouput to decide between large and small runlengths
thresh_mask = out~=0;
%// Fill output array with cumsum and then rep-bsxfun based approaches
out(thresh_mask) = values(out(thresh_mask));
values = values(~mask);
runLengths = runLengths(~mask);
maxlen = max(runLengths);
all_values = repmat(values,maxlen,1);
out(~thresh_mask) = all_values(bsxfun(#le,(1:maxlen)',runLengths)); %//'
if yes_transpose_output
out = out.'; %//'
end
return;

Related

get pairs / triple / quadruple... of elements from vector by function

I have a vector with a couple of elements and I want to write a function that returns me all combinations of x items from this vector.
The following code produces the right output for the case x=2 or x=3 or x=4.
However, I can not implement a solution for every possible x following this idea.
values = {'A','B','C','D','E'};
n = length(values);
data2 = {}; % case x=2
for i = 1:n
for j = i+1:n
data2{end+1} = {values{i}, values{j}};
fprintf('%s %s\n',values{i}, values{j})
end
end
data3 = {}; % case x=3
for i = 1:n
for j = i+1:n
for k = j+1:n
data3{end+1} = {values{i}, values{j}, values{k}};
fprintf('%s %s %s\n',values{i}, values{j}, values{k})
end
end
end
data4 = {}; % case x=4
for i = 1:n
for j = i+1:n
for k = j+1:n
for l = k+1:n
data4{end+1} = {values{i}, values{j}, values{k}, values{l}};
fprintf('%s %s %s %s\n',values{i}, values{j}, values{k}, values{l})
end
end
end
end
How would a function look like which would be able to return my data variable?
data = getCombinations(values, x) %values is vector with elements, x is integer value
EDIT
The following code comes pretty close:
data = perms(values)
data = data(:,1:x)
data = unique(data,'rows')
but it still produces output like A,B and B,A
EDIT2
This fixed it somehow but it is not very nice to look at and it does not work for text entries in cells but only for numbers
data = perms(values)
data = data(:,1:x)
data = sort(data,2)
data = unique(data,'rows')
EDIT3
This did it but it is not very nice to look at... Maybe there is a better solution?
function [data] = getCombinations(values,x)
i = 1:length(values);
d = perms(i);
d = d(:,1:x);
d = sort(d,2);
d = unique(d,'rows');
data = v(d);
end
If you don't want repetitions (and your example suggests you don't) then try nchoosek as nchoosek(1:n, x) to give indices:
values = {'A','B','C','D','E'};
n = length(values);
x = 3;
C = nchoosek(1:n, x);
data = values(C)
In the above, each row is a unique combination of 3 of the 5 elements of values.
Alternatively pass in the values directly:
data = nchoosek(values, x);

Image Processing: Algorithm taking too long in MATLAB

I am working in MATLAB to process two 512x512 images, the domain image and the range image. What I am trying to accomplish is the following:
Divide both domain and range images into 8x8 pixel blocks
For each 8x8 block in the domain image, I have to apply a linear transformations to it and compare each of the 4096 transformed blocks with each of the 4096 range blocks.
Compute error in each case between the transformed block and the range image block and find the minimum error.
Finally I'll have for each 8x8 range block, the id of the 8x8 domain block for which the error was minimum (error between the range block and the transformed domain block)
To achieve this, I have written the following code:
RangeImagecolor = imread('input.png'); %input is 512x512
DomainImagecolor = imread('input.png'); %Range and Domain images are identical
RangeImagetemp = rgb2gray(RangeImagecolor);
DomainImagetemp = rgb2gray(DomainImagecolor);
RangeImage = im2double(RangeImagetemp);
DomainImage = im2double(DomainImagetemp);
%For the (k,l)th 8x8 range image block
for k = 1:64
for l = 1:64
minerror = 9999;
min_i = 0;
min_j = 0;
for i = 1:64
for j = 1:64
%here I compute for the (i,j)th domain block, the transformed domain block stored in D_trans
error = 0;
D_trans = zeros(8,8);
R = zeros(8,8); %Contains the pixel values of the (k,l)th range block
for m = 1:8
for n = 1:8
R(m,n) = RangeImage(8*k-8+m,8*l-8+n);
%ApplyTransformation can depend on (k,l) so I can't compute the transformation outside the k,l loop.
[m_dash,n_dash] = ApplyTransformation(8*i-8+m,8*j-8+n);
D_trans(m,n) = DomainImage(m_dash,n_dash);
error = error + (R(m,n)-D_trans(m,n))^2;
end
end
if(error < minerror)
minerror = error;
min_i = i;
min_j = j;
end
end
end
end
end
As an example ApplyTransformation, one can use the identity transformation:
function [x_dash,y_dash] = Iden(x,y)
x_dash = x;
y_dash = y;
end
Now the problem I am facing is the high computation time. The order of computation in the above code is 64^5, which is of the order 10^9. This computation should take at the worst minutes or an hour. It takes about 40 minutes to compute just 50 iterations. I don't know why the code is running so slow.
Thanks for reading my question.
You can use im2col* to convert the image to column format so each block forms a column of a [64 * 4096] matrix. Then apply transformation to each column and use bsxfun to vectorize computation of error.
DomainImage=rand(512);
RangeImage=rand(512);
DomainImage_col = im2col(DomainImage,[8 8],'distinct');
R = im2col(RangeImage,[8 8],'distinct');
[x y]=ndgrid(1:8);
function [x_dash, y_dash] = ApplyTransformation(x,y)
x_dash = x;
y_dash = y;
end
[x_dash, y_dash] = ApplyTransformation(x,y);
idx = sub2ind([8 8],x_dash, y_dash);
D_trans = DomainImage_col(idx,:); %transformation is reduced to matrix indexing
Error = 0;
for mn = 1:64
Error = Error + bsxfun(#minus,R(mn,:),D_trans(mn,:).').^2;
end
[minerror ,min_ij]= min(Error,[],2); % linear index of minimum of each block;
[min_i min_j]=ind2sub([64 64],min_ij); % convert linear index to subscript
Explanation:
Our goal is to reduce number of loops as much as possible. For it we should avoid matrix indexing and instead we should use vectorization. Nested loops should be converted to one loop. As the first step we can create a more optimized loop as here:
min_ij = zeros(4096,1);
for kl = 1:4096 %%% => 1:size(D_trans,2)
minerror = 9999;
min_ij(kl) = 0;
for ij = 1:4096 %%% => 1:size(R,2)
Error = 0;
for mn = 1:64
Error = Error + (R(mn,kl) - D_trans(mn,ij)).^2;
end
if(Error < minerror)
minerror = Error;
min_ij(kl) = ij;
end
end
end
We can re-arrange the loops and we can make the most inner loop as the outer loop and separate computation of the minimum from the computation of the error.
% Computation of the error
Error = zeros(4096,4096);
for mn = 1:64
for kl = 1:4096
for ij = 1:4096
Error(kl,ij) = Error(kl,ij) + (R(mn,kl) - D_trans(mn,ij)).^2;
end
end
end
% Computation of the min
min_ij = zeros(4096,1);
for kl = 1:4096
minerror = 9999;
min_ij(kl) = 0;
for ij = 1:4096
if(Error(kl,ij) < minerror)
minerror = Error(kl,ij);
min_ij(kl) = ij;
end
end
end
Now the code is arranged in a way that can best be vectorized:
Error = 0;
for mn = 1:64
Error = Error + bsxfun(#minus,R(mn,:),D_trans(mn,:).').^2;
end
[minerror ,min_ij] = min(Error, [], 2);
[min_i ,min_j] = ind2sub([64 64], min_ij);
*If you don't have the Image Processing Toolbox a more efficient implementation of im2col can be found here.
*The whole computation takes less than a minute.
First things first - your code doesn't do anything. But you likely do something with this minimum error stuff and only forgot to paste this here, or still need to code that bit. Never mind for now.
One big issue with your code is that you calculate transformation for 64x64 blocks of resulting image AND source image. 64^5 iterations of a complex operation are bound to be slow. Rather, you should calculate all transformations at once and save them.
allTransMats = cell(64);
for i = 1 : 64
for j = 1 : 64
allTransMats{i,j} = getTransformation(DomainImage, i, j)
end
end
function D_trans = getTransformation(DomainImage, i,j)
D_trans = zeros(8);
for m = 1 : 8
for n = 1 : 8
[m_dash,n_dash] = ApplyTransformation(8*i-8+m,8*j-8+n);
D_trans(m,n) = DomainImage(m_dash,n_dash);
end
end
end
This serves to get allTransMat and is OUTSIDE the k, l loop. Preferably as a simple function.
Now, you make your big k, l, i, j loop, where you compare all the elements as needed. Comparison could be also done block-wise instead of filling a small 8x8 matrix, yet doing it per element for some reason.
m = 1 : 8;
n = m;
for ...
R = RangeImage(...); % This will give 8x8 output as n and m are vectors.
D = allTransMats{i,j};
difference = sum(sum((R-D).^2));
if (difference < minDifference) ...
end
Even though this is a simple no transformations case, this speeds up code a lot.
Finally, are you sure you need to compare each block of transformed output with each block in the source? Typically you compare block1(a,b) with block2(a,b) - blocks (or pixels) on the same position.
EDIT: allTransMats requires k and l too. Ouch. There is NO WAY to make this fast for a single iteration, as you require 64^5 calls to ApplyTransformation (or a vectorization of that function, but even then it might not be fast - we would have to see the function to help here).
Therefore, I will re-iterate my advice to generate all transformations and then perform lookup: this upper part of the answer with allTransMats generation should be changed to have all 4 loops and generate allTransMats{i,j,k,l};. It WILL be slow, there is no way around that as I mentioned in the upper part of edit. But, it is a cost you pay once, as after saving the allTransMats, all further image analyses will be able to simply load it instead of generating it again.
But ... what do you even do? Transformation that depends on source and destination block indices plus pixel indices (= 6 values total) sounds like a mistake somewhere, or a prime candidate to optimize instead of all the rest.

K-means for image compression only gives black-and-white result

I'm doing this exercise by Andrew NG about using k-means to reduce the number of colors in an image. But the problem is my code only gives a black-and-white image :( . I have checked every step in the algorithm but it still won't give the correct result. Please help me, thank you very much
Here is the link of the exercise, and here is the dataset.
The correct result is given in the link of the exercise. And here is my black-and-white image:
Here is my code:
function [] = KMeans()
Image = double(imread('bird_small.tiff'));
[rows,cols, RGB] = size(Image);
Points = reshape(Image,rows * cols, RGB);
K = 16;
Centroids = zeros(K,RGB);
s = RandStream('mt19937ar','Seed',0);
% Initialization :
% Pick out K random colours and make sure they are all different
% from each other! This prevents the situation where two of the means
% are assigned to the exact same colour, therefore we don't have to
% worry about division by zero in the E-step
% However, if K = 16 for example, and there are only 15 colours in the
% image, then this while loop will never exit!!! This needs to be
% addressed in the future :(
% TODO : Vectorize this part!
done = false;
while done == false
RowIndex = randperm(s,rows);
ColIndex = randperm(s,cols);
RowIndex = RowIndex(1:K);
ColIndex = ColIndex(1:K);
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
Centroids = sort(Centroids,2);
Centroids = unique(Centroids,'rows');
if size(Centroids,1) == K
done = true;
end
end;
% imshow(imread('bird_small.tiff'))
%
% for i = 1 : K
% hold on;
% plot(RowIndex(i),ColIndex(i),'r+','MarkerSize',50)
% end
eps = 0.01; % Epsilon
IterNum = 0;
while 1
% E-step: Estimate membership given parameters
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
Dist = pdist2(Points,Centroids,'euclidean');
[~, WhichCentroid] = min(Dist,[],2);
% M-step: Estimate parameters given membership
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
% TODO: Vectorize this part!
OldCentroids = Centroids;
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
% Check for convergence: Here we use the L2 distance
IterNum = IterNum + 1;
Margins = sqrt(sum((Centroids - OldCentroids).^2, 2));
if sum(Margins > eps) == 0
break;
end
end
IterNum;
Centroids ;
% Load the larger image
[LargerImage,ColorMap] = imread('bird_large.tiff');
LargerImage = double(LargerImage);
[largeRows,largeCols,~] = size(LargerImage); % RGB is always 3
% Dist = zeros(size(Centroids,1),RGB);
% TODO: Vectorize this part!
% Replace each of the pixel with the nearest centroid
for i = 1 : largeRows
for j = 1 : largeCols
Dist = pdist2(Centroids,reshape(LargerImage(i,j,:),1,RGB),'euclidean');
[~,WhichCentroid] = min(Dist);
LargerImage(i,j,:) = Centroids(WhichCentroid);
end
end
% Display new image
imshow(uint8(round(LargerImage)),ColorMap)
imwrite(uint8(round(LargerImage)), 'D:\Hoctap\bird_kmeans.tiff');
You're indexing into Centroids with a single linear index.
Centroids(WhichCentroid)
This is going to return a single value (specifically the red value for that centroid). When you assign this to LargerImage(i,j,:), it will assign all RGB channels the same value resulting in a grayscale image.
You likely want to grab all columns of the selected centroid to provide an array of red, green, and blue values that you want to assign to LargerImage(i,j,:). You can do by using a colon : to specify all columns of Centroids which belong to the row indicated by WhichCentroid.
LargerImage(i,j,:) = Centroids(WhichCentroid,:);

K-means for color quantization - Code not vectorized

I'm doing this exercise by Andrew NG about using k-means to reduce the number of colors in an image. It worked correctly but I'm afraid it's a little slow because of all the for loops in the code, so I'd like to vectorize them. But there are those loops that I just can't seem to vectorize effectively. Please help me, thank you very much!
Also if possible please give some feedback on my coding style :)
Here is the link of the exercise, and here is the dataset.
The correct result is given in the link of the exercise.
And here is my code:
function [] = KMeans()
Image = double(imread('bird_small.tiff'));
[rows,cols, RGB] = size(Image);
Points = reshape(Image,rows * cols, RGB);
K = 16;
Centroids = zeros(K,RGB);
s = RandStream('mt19937ar','Seed',0);
% Initialization :
% Pick out K random colours and make sure they are all different
% from each other! This prevents the situation where two of the means
% are assigned to the exact same colour, therefore we don't have to
% worry about division by zero in the E-step
% However, if K = 16 for example, and there are only 15 colours in the
% image, then this while loop will never exit!!! This needs to be
% addressed in the future :(
% TODO : Vectorize this part!
done = false;
while done == false
RowIndex = randperm(s,rows);
ColIndex = randperm(s,cols);
RowIndex = RowIndex(1:K);
ColIndex = ColIndex(1:K);
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
Centroids = sort(Centroids,2);
Centroids = unique(Centroids,'rows');
if size(Centroids,1) == K
done = true;
end
end;
% imshow(imread('bird_small.tiff'))
%
% for i = 1 : K
% hold on;
% plot(RowIndex(i),ColIndex(i),'r+','MarkerSize',50)
% end
eps = 0.01; % Epsilon
IterNum = 0;
while 1
% E-step: Estimate membership given parameters
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
Dist = pdist2(Points,Centroids,'euclidean');
[~, WhichCentroid] = min(Dist,[],2);
% M-step: Estimate parameters given membership
% Membership: The centroid that each colour is assigned to
% Parameters: Location of centroids
% TODO: Vectorize this part!
OldCentroids = Centroids;
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
% Check for convergence: Here we use the L2 distance
IterNum = IterNum + 1;
Margins = sqrt(sum((Centroids - OldCentroids).^2, 2));
if sum(Margins > eps) == 0
break;
end
end
IterNum;
Centroids ;
% Load the larger image
[LargerImage,ColorMap] = imread('bird_large.tiff');
LargerImage = double(LargerImage);
[largeRows,largeCols,NewRGB] = size(LargerImage); % RGB is always 3
% TODO: Vectorize this part!
largeRows
largeCols
NewRGB
% Replace each of the pixel with the nearest centroid
NewPoints = reshape(LargerImage,largeRows * largeCols, NewRGB);
Dist = pdist2(NewPoints,Centroids,'euclidean');
[~,WhichCentroid] = min(Dist,[],2);
NewPoints = Centroids(WhichCentroid,:);
LargerImage = reshape(NewPoints,largeRows,largeCols,NewRGB);
% for i = 1 : largeRows
% for j = 1 : largeCols
% Dist = pdist2(Centroids,reshape(LargerImage(i,j,:),1,RGB),'euclidean');
% [~,WhichCentroid] = min(Dist);
% LargerImage(i,j,:) = Centroids(WhichCentroid,:);
% end
% end
% Display new image
imshow(uint8(round(LargerImage)),ColorMap)
UPDATE: Replaced
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
with
for i = 1 : K
Centroids(i,:) = Image(RowIndex(i),ColIndex(i),:);
end
I think this may be vectorized further by using linear indexing, but for now I should just focus on the while loop since it takes most of the time.
Also when I tried #Dev-iL's suggestion and replaced
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
with
E = sparse(1:size(WhichCentroid), WhichCentroid' , 1, Num, K, Num);
Centroids = (E * spdiags(1./sum(E,1)',0,K,K))' * Points ;
the results were always worse: With K = 16, the first takes 2,414s , the second takes 2,455s ; K = 32, the first takes 4,529s , the second takes 5,022s. Seems like vectorization does not help, but maybe there's something wrong with my code :( .
Replaced
for i = 1 : K
for j = 1 : RGB
Centroids(i,j) = Image(RowIndex(i),ColIndex(i),j);
end
end
with
for i = 1 : K
Centroids(i,:) = Image(RowIndex(i),ColIndex(i),:);
end
I think this may be vectorized further by using linear indexing, but for now I should just focus on the while loop since it takes most of the time.
Also when I tried #Dev-iL's suggestion and replaced
for i = 1 : K
PointsInCentroid = Points((find(WhichCentroid == i))',:);
NumOfPoints = size(PointsInCentroid,1);
% Note that NumOfPoints is never equal to 0, as a result of
% the initialization. Or .... ???????
if NumOfPoints ~= 0
Centroids(i,:) = sum(PointsInCentroid , 1) / NumOfPoints ;
end
end
with
E = sparse(1:size(WhichCentroid), WhichCentroid' , 1, Num, K, Num);
Centroids = (E * spdiags(1./sum(E,1)',0,K,K))' * Points ;
the results were always worse: With K = 16, the first takes 2,414s , the second takes 2,455s ; K = 32, the first took 4,529s , the second took 5,022s. Seems like vectorization did not help in this case.
However, when I replaced
Dist = pdist2(Points,Centroids,'euclidean');
[~, WhichCentroid] = min(Dist,[],2);
(in the while loop) with
Dist = bsxfun(#minus,dot(Centroids',Centroids',1)' / 2 , Centroids * Points' );
[~, WhichCentroid] = min(Dist,[],1);
WhichCentroid = WhichCentroid';
the code ran much faster, especially when K is large (K=32)
Thank you everyone!

How to make a Gaussian filter in Matlab

I have tried to make a Gaussian filter in Matlab without using imfilter() and fspecial().
I have tried this but result is not like the one I have with imfilter and fspecial.
Here is my codes.
function Gaussian_filtered = Gauss(image_x, sigma)
% for single axis
% http://en.wikipedia.org/wiki/Gaussian_filter
Gaussian_filtered = exp(-image_x^2/(2*sigma^2)) / (sigma*sqrt(2*pi));
end
for 2D Gaussian,
function h = Gaussian2D(hsize, sigma)
n1 = hsize;
n2 = hsize;
for i = 1 : n2
for j = 1 : n1
% size is 10;
% -5<center<5 area is covered.
c = [j-(n1+1)/2 i-(n2+1)/2]';
% A product of both axes is 2D Gaussian filtering
h(i,j) = Gauss(c(1), sigma)*Gauss(c(2), sigma);
end
end
end
and the final one is
function Filtered = GaussianFilter(ImageData, hsize, sigma)
%Get the result of Gaussian
filter_ = Gaussian2D(hsize, sigma);
%check image
[r, c] = size(ImageData);
Filtered = zeros(r, c);
for i=1:r
for j=1:c
for k=1:hsize
for m=1:hsize
Filtered = Filtered + ImageData(i,j).*filter_(k,m);
end
end
end
end
end
But the processed image is almost same as the input image. I wonder the last function GaussianFiltered() is problematic...
Thanks.
here's an alternative:
Create the 2D-Gaussian:
function f=gaussian2d(N,sigma)
% N is grid size, sigma speaks for itself
[x y]=meshgrid(round(-N/2):round(N/2), round(-N/2):round(N/2));
f=exp(-x.^2/(2*sigma^2)-y.^2/(2*sigma^2));
f=f./sum(f(:));
Filtered image, given your image is called Im:
filtered_signal=conv2(Im,gaussian2d(N,sig),'same');
Here's some plots:
imagesc(gaussian2d(7,2.5))
Im=rand(100);subplot(1,2,1);imagesc(Im)
subplot(1,2,2);imagesc(conv2(Im,gaussian2d(7,2.5),'same'));
This example code is slow because of the for-loops. In matlab you can better use conv2, as suggested by user:bla, or just use filter2.
I = imread('peppers.png'); %load example data
I = I(:,:,1);
N=5; %must be odd
sigma=1;
figure(1);imagesc(I);colormap gray
x=1:N;
X=exp(-(x-((N+1)/2)).^2/(2*sigma^2));
h=X'*X;
h=h./sum(h(:));
%I=filter2(h,I); %this is faster
[is,js]=size(I);
Ib = NaN(is+N-1,js+N-1); %add borders
b=(N-1)/2 +1;
Ib(b:b+is-1,b:b+js-1)=I;
I=zeros(size(I));
for i = 1:is
for j = 1:js
I(i,j)=sum(sum(Ib(i:i+N-1,j:j+N-1).*h,'omitnan'));
end
end
figure(2);imagesc(I);colormap gray

Resources