How to find the exact elemental value in the loop? - matlab

I am writing a code to implement a basic optimization method to maintain minimum error between an observed and simulated profile. I am doing this with simple root mean square error. The whole code I have given here for convinence.
%% Vegetation profile along a hillslope
% Parameters
Tv = 1; % values are {1,2,3,5,10}
%Kvg = 0.3; % according to the Tv values Kvg=1/Tv
Kvg = 1/Tv;
Kvd = 0.5; % valuse are {0.1,0.5,1,5,10}
Tcv = 10; % values are {20,50,80,130,180}
q = (1/3);
Nv = (1:1:10); %(Kvd*Tcv)/Kvg; range is given for the parameter
m = 2/3;
n = 2/3;
%% Horizontal coordinate and length scale
x = 0:1:100;
L = 100;
x_non = x/L;
Ne =10; %L/Tcv;
%% Vegetation profile on the slope
v = zeros(length(x_non),length(Nv));
for i = 1:length(x_non)
for j = 1:length(Nv)
v(i,j) = (1/(1+Nv(j)*(Ne^q)*(x_non(i)^q)));
end
end
%% importing the excel file containg real dataset
obs_data = xlsread('model_data.xlsx'); % 1-distance, 2-elevation, 3- vegetation, 4-slope
%% root mean square error between real and synthetic dataset of vegetation
rmse_v = zeros(length(x_non),length(Nv));
err_v = zeros(1,length(Nv));
for j=1:length(Nv)
rmse_v(:,j)=(obs_data(:,3)-v(:,j)).^2;
err_v(1,j)= sqrt(mean(rmse_v(:,j)));
end
Now I have to extract the value of Nv for which the rmse_v is minimum. The Nv value for which the aforesaid statement is true has to be used in the next session.
Nt = (1:1:100);
% Slope profile on the x direction
s = zeros(length(x_non),length(Nt));
for i = 1:length(x_non)
for j = 1:length(Nt)
s(i,j)=((Ne^q)/Nt(j))*(x_non(i)^(q-m))+(1/(Nt(j)*(x_non(i)^m)*(1+Nv*(Ne^q)...
*(x_non(i)^q))))^(1/n);
end
end
I shall be grateful to get the way of writing the way to do the particular thing.
Thank you.
Regards

Related

How do I find local threshold for coefficients in image compression using DWT in MATLAB

I'm trying to write an image compression script in MATLAB using multilayer 3D DWT(color image). along the way, I want to apply thresholding on coefficient matrices, both global and local thresholds.
I like to use the formula below to calculate my local threshold:
where sigma is variance and N is the number of elements.
Global thresholding works fine; but my problem is that the calculated local threshold is (most often!) greater than the maximum band coefficient, therefore no thresholding is applied.
Everything else works fine and I get a result too, but I suspect the local threshold is miscalculated. Also, the resulting image is larger than the original!
I'd appreciate any help on the correct way to calculate the local threshold, or if there's a pre-set MATLAB function.
here's an example output:
here's my code:
clear;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% COMPRESSION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% read base image
% dwt 3/5-L on base images
% quantize coeffs (local/global)
% count zero value-ed coeffs
% calculate mse/psnr
% save and show result
% read images
base = imread('circ.jpg');
fam = 'haar'; % wavelet family
lvl = 3; % wavelet depth
% set to 1 to apply global thr
thr_type = 0;
% global threshold value
gthr = 180;
% convert base to grayscale
%base = rgb2gray(base);
% apply dwt on base image
dc = wavedec3(base, lvl, fam);
% extract coeffs
ll_base = dc.dec{1};
lh_base = dc.dec{2};
hl_base = dc.dec{3};
hh_base = dc.dec{4};
ll_var = var(ll_base, 0);
lh_var = var(lh_base, 0);
hl_var = var(hl_base, 0);
hh_var = var(hh_base, 0);
% count number of elements
ll_n = numel(ll_base);
lh_n = numel(lh_base);
hl_n = numel(hl_base);
hh_n = numel(hh_base);
% find local threshold
ll_t = ll_var * (sqrt(2 * log2(ll_n)));
lh_t = lh_var * (sqrt(2 * log2(lh_n)));
hl_t = hl_var * (sqrt(2 * log2(hl_n)));
hh_t = hh_var * (sqrt(2 * log2(hh_n)));
% global
if thr_type == 1
ll_t = gthr; lh_t = gthr; hl_t = gthr; hh_t = gthr;
end
% count zero values in bands
ll_size = size(ll_base);
lh_size = size(lh_base);
hl_size = size(hl_base);
hh_size = size(hh_base);
% count zero values in new band matrices
ll_zeros = sum(ll_base==0,'all');
lh_zeros = sum(lh_base==0,'all');
hl_zeros = sum(hl_base==0,'all');
hh_zeros = sum(hh_base==0,'all');
% initiate new matrices
ll_new = zeros(ll_size);
lh_new = zeros(lh_size);
hl_new = zeros(lh_size);
hh_new = zeros(lh_size);
% apply thresholding on bands
% if new value < thr => 0
% otherwise, keep the previous value
for id=1:ll_size(1)
for idx=1:ll_size(2)
if ll_base(id,idx) < ll_t
ll_new(id,idx) = 0;
else
ll_new(id,idx) = ll_base(id,idx);
end
end
end
for id=1:lh_size(1)
for idx=1:lh_size(2)
if lh_base(id,idx) < lh_t
lh_new(id,idx) = 0;
else
lh_new(id,idx) = lh_base(id,idx);
end
end
end
for id=1:hl_size(1)
for idx=1:hl_size(2)
if hl_base(id,idx) < hl_t
hl_new(id,idx) = 0;
else
hl_new(id,idx) = hl_base(id,idx);
end
end
end
for id=1:hh_size(1)
for idx=1:hh_size(2)
if hh_base(id,idx) < hh_t
hh_new(id,idx) = 0;
else
hh_new(id,idx) = hh_base(id,idx);
end
end
end
% count zeros of the new matrices
ll_new_size = size(ll_new);
lh_new_size = size(lh_new);
hl_new_size = size(hl_new);
hh_new_size = size(hh_new);
% count number of zeros among new values
ll_new_zeros = sum(ll_new==0,'all');
lh_new_zeros = sum(lh_new==0,'all');
hl_new_zeros = sum(hl_new==0,'all');
hh_new_zeros = sum(hh_new==0,'all');
% set new band matrices
dc.dec{1} = ll_new;
dc.dec{2} = lh_new;
dc.dec{3} = hl_new;
dc.dec{4} = hh_new;
% count how many coeff. were thresholded
ll_zeros_diff = ll_new_zeros - ll_zeros;
lh_zeros_diff = lh_zeros - lh_new_zeros;
hl_zeros_diff = hl_zeros - hl_new_zeros;
hh_zeros_diff = hh_zeros - hh_new_zeros;
% show coeff. matrices vs. thresholded version
figure
colormap(gray);
subplot(2,4,1); imagesc(ll_base); title('LL');
subplot(2,4,2); imagesc(lh_base); title('LH');
subplot(2,4,3); imagesc(hl_base); title('HL');
subplot(2,4,4); imagesc(hh_base); title('HH');
subplot(2,4,5); imagesc(ll_new); title({'LL thr';ll_zeros_diff});
subplot(2,4,6); imagesc(lh_new); title({'LH thr';lh_zeros_diff});
subplot(2,4,7); imagesc(hl_new); title({'HL thr';hl_zeros_diff});
subplot(2,4,8); imagesc(hh_new); title({'HH thr';hh_zeros_diff});
% idwt to reconstruct compressed image
cmp = waverec3(dc);
cmp = uint8(cmp);
% calculate mse/psnr
D = abs(cmp - base) .^2;
mse = sum(D(:))/numel(base);
psnr = 10*log10(255*255/mse);
% show images and mse/psnr
figure
subplot(1,2,1);
imshow(base); title("Original"); axis square;
subplot(1,2,2);
imshow(cmp); colormap(gray); axis square;
msg = strcat("MSE: ", num2str(mse), " | PSNR: ", num2str(psnr));
title({"Compressed";msg});
% save image locally
imwrite(cmp, 'compressed.png');
I solved the question.
the sigma in the local threshold formula is not variance, it's the standard deviation. I applied these steps:
used stdfilt() std2() to find standard deviation of my coeff. matrices (thanks to #Rotem for pointing this out)
used numel() to count the number of elements in coeff. matrices
this is a summary of the process. it's the same for other bands (LH, HL, HH))
[c, s] = wavedec2(image, wname, level); %apply dwt
ll = appcoeff2(c, s, wname); %find LL
ll_std = std2(ll); %find standard deviation
ll_n = numel(ll); %find number of coeffs in LL
ll_t = ll_std * (sqrt(2 * log2(ll_n))); %local the formula
ll_new = ll .* double(ll > ll_t); %thresholding
replace the LL values in c in a for loop
reconstruct by applying IDWT using waverec2
this is a sample output:

MATLAB to Scilab conversion: mfile2sci error "File contains no instruction"

I am very new to Scilab, but so far have not been able to find an answer (either here or via google) to my question. I'm sure it's a simple solution, but I'm at a loss. I have a lot of MATLAB scripts I wrote in grad school, but now that I'm out of school, I no longer have access to MATLAB (and can't justify the cost). Scilab looked like the best open alternative. I'm trying to convert my .m files to Scilab compatible versions using mfile2sci, but when running the mfile2sci GUI, I get the error/message shown below. Attached is the original code from the M-file, in case it's relevant.
I Searched Stack Overflow and companion sites, Google, Scilab documentation.
The M-file code follows (it's a super basic MATLAB script as part of an old homework question -- I chose it as it's the shortest, most straightforward M-file I had):
Mmax = 15;
N = 20;
T = 2000;
%define upper limit for sparsity of signal
smax = 15;
mNE = zeros(smax,Mmax);
mESR= zeros(smax,Mmax);
for M = 1:Mmax
aNormErr = zeros(smax,1);
aSz = zeros(smax,1);
ESR = zeros(smax,1);
for s=1:smax % for-loop to loop script smax times
normErr = zeros(1,T);
vESR = zeros(1,T);
sz = zeros(1,T);
for t=1:T %for-loop to carry out 2000 trials per s-value
esr = 0;
A = randn(M,N); % generate random MxN matrix
[M,N] = size(A);
An = zeros(M,N); % initialize normalized matrix
for h = 1:size(A,2) % normalize columns of matrix A
V = A(:,h)/norm(A(:,h));
An(:,h) = V;
end
A = An; % replace A with its column-normalized counterpart
c = randperm(N,s); % create random support vector with s entries
x = zeros(N,1); % initialize vector x
for i = 1:size(c,2)
val = (10-1)*rand + 1;% generate interval [1,10]
neg = mod(randi(10),2); % include [-10,-1]
if neg~=0
val = -1*val;
end
x(c(i)) = val; %replace c(i)th value of x with the nonzero value
end
y = A*x; % generate measurement vector (y)
R = y;
S = []; % initialize array to store selected columns of A
indx = []; % vector to store indices of selected columns
coeff = zeros(1,s); % vector to store coefficients of approx.
stop = 10; % init. stop condition
in = 0; % index variable
esr = 0;
xhat = zeros(N,1); % intialize estimated x signal
while (stop>0.5 && size(S,2)<smax)
%MAX = abs(A(:,1)'*R);
maxV = zeros(1,N);
for i = 1:size(A,2)
maxV(i) = abs(A(:,i)'*R);
end
in = find(maxV == max(maxV));
indx = [indx in];
S = [S A(:,in)];
coeff = [coeff R'*S(:,size(S,2))]; % update coefficient vector
for w=1:size(S,2)
r = y - ((R'*S(:,w))*S(:,w)); % update residuals
if norm(r)<norm(R)
index = w;
end
R = r;
stop = norm(R); % update stop condition
end
for j=1:size(S,2) % place coefficients into xhat at correct indices
xhat(indx(j))=coeff(j);
end
nE = norm(x-xhat)/norm(x); % calculate normalized error for this estimate
%esr = 0;
indx = sort(indx);
c = sort(c);
if isequal(indx,c)
esr = esr+1;
end
end
vESR(t) = esr;
sz(t) = size(S,2);
normErr(t) = nE;
end
%avsz = sum(sz)/T;
aSz(s) = sum(sz)/T;
%aESR = sum(vESR)/T;
ESR(s) = sum(vESR)/T;
%avnormErr = sum(normErr)/T; % produce average normalized error for these run
aNormErr(s) = sum(normErr)/T; % add new avnormErr to vector of all av norm errors
end
% just put this here to view the vector
mNE(:,M) = aNormErr;
mESR(:,M) = ESR;
% had an 'end' placed here, might've been unmatched
mNE%reshape(mNE,[],Mmax)
mESR%reshape(mESR,[],Mmax)]
figure
dimx = [1 Mmax];
dimy = [1 smax];
imagesc(dimx,dimy,mESR)
colormap gray
strESR = sprintf('Average ESR, N=%d',N);
title(strESR);
xlabel('M');
ylabel('s');
strNE = sprintf('Average Normed Error, N=%d',N);
figure
imagesc(dimx,dimy,mNE)
colormap gray
title(strNE)
xlabel('M');
ylabel('s');
The command used (and results) follow:
--> mfile2sci
ans =
[]
****** Beginning of mfile2sci() session ******
File to convert: C:/Users/User/Downloads/WTF_new.m
Result file path: C:/Users/User/DOWNLO~1/
Recursive mode: OFF
Only double values used in M-file: NO
Verbose mode: 3
Generate formatted code: NO
M-file reading...
M-file reading: Done
Syntax modification...
Syntax modification: Done
File contains no instruction, no translation made...
****** End of mfile2sci() session ******
To convert the foo.m file one has to enter
mfile2sci <path>/foo.m
where stands for the path of the directoty where foo.m is. The result is written in /foo.sci
Remove the ```` at the begining of each line, the conversion will proceed normally ?. However, don't expect to obtain a working .sci file as the m2sci converter is (to me) still an experimental tool !

parfor doesn't consider information about vectors which are used in it

This is a part of my code in Matlab. I tried to make it parallel but there is an error:
The variable gax in a parfor cannot be classified.
I know why the error occurs. because I should tell Matlab that v is an incresing vector which doesn't contain repeated elements. Could anyone help me to use this information to parallelize the code?
v=[1,3,6,8];
ggx=5.*ones(15,14);
gax=ones(15,14);
for m=v
if m > 1
parfor j=1:m-1
gax(j,m-1) = ggx(j,m-1);
end
end
if m<nn
parfor jo=m+1:15
gax(jo,m) = ggx(jo,m);
end
end
end
Optimizing a code should be closely related to its purpose, especially when you use parfor. The code you wrote in the question can be written in a much more efficient way, and definitely, do not need to be parallelized.
However, I understand that you tried to simplify the problem, just to get the idea of how to slice your variables, so here is a fixed version the can run with parfor. But this is surely not the way to write this code:
v = [1,3,6,8];
ggx = 5.*ones(15,14);
gax = ones(15,14);
nn = 5;
for m = v
if m > 1
temp_end = m-1;
temp = ggx(:,temp_end);
parfor ja = 1:temp_end
gax(ja,temp_end) = temp(ja);
end
end
if m < nn
temp = ggx(:,m);
parfor jo = m+1:15
gax(jo,m) = temp(jo);
end
end
end
A vectorized implementation will look like this:
v = [1,3,6,8];
ggx = 5.*ones(15,14);
gax = ones(15,14);
nn = 5;
m1 = v>1; % first condition with logical indexing
temp = v(m1)-1; % get the values from v
r = ones(1,sum(temp)); % generate a vector of indicies
r(cumsum(temp)) = -temp+1; % place the reseting locations
r = cumsum(r); % calculate the indecies
r(cumsum(temp)) = temp; % place the ending points
c = repelem(temp,temp); % create an indecies vector for the columns
inds1 = sub2ind(size(gax),r,c); % convert the indecies to linear
mnn = v<nn; % second condition with logical indexing
temp = v(mnn)+1; % get the values from v
r_max = size(gax,1); % get the height of gax
r_count = r_max-temp+1; % calculate no. of rows per value in v
r = ones(1,sum(r_count)); % generate a vector of indicies
r([1 r_count(1:end-1)+1]) = temp; % set the t indicies
r(cumsum(r_count)+1) = -(r_count-temp)+1; % place the reseting locations
r = cumsum(r(1:end-1)); % calculate the indecies
c = repelem(temp-1,r_count); % create an indecies vector for the columns
inds2 = sub2ind(size(gax),r,c); % convert the indecies to linear
gax([inds1 inds2]) = ggx([inds1 inds2]); % assgin the relevant values
This is indeed quite complicated, and not always necessary. A good thing to remember, though, is that nested for loop are much slower than a single loop, so in some cases (depend on the size of the output), this will may be the fastest solution:
for m = v
if m > 1
gax(1:m-1,m-1) = ggx(1:m-1,m-1);
end
if m<nn
gax(m+1:15,m) = ggx(m+1:15,m);
end
end

Solving PDE with Matlab

`sol = pdepe(m,#ParticleDiffusionpde,#ParticleDiffusionic,#ParticleDiffusionbc,x,t);
% Extract the first solution component as u.
u = sol(:,:,:);
function [c,f,s] = ParticleDiffusionpde(x,t,u,DuDx)
global Ds
c = 1/Ds;
f = DuDx;
s = 0;
function u0 = ParticleDiffusionic(x)
global qo
u0 = qo;
function [pl,ql,pr,qr] = ParticleDiffusionbc(xl,ul,xr,ur,t,x)
global Ds K n
global Amo Gc kf rhop
global uavg
global dr R nr
sum = 0;
for i = 1:1:nr-1
r1 = (i-1)*dr; % radius at i
r2 = i * dr; % radius at i+1
r1 = double(r1); % convert to double precision
r2 = double(r2);
sum = sum + (dr / 2 * (r1*ul+ r2*ur));
end;
uavg = 3/R^3 * sum;
ql = 1;
pl = 0;
qr = 1;
pr = -((kf/(Ds.*rhop)).*(Amo - Gc.*uavg - ((double(ur/K)).^2).^(n/2) ));`
dq(r,t)/dt = Ds( d2q(r,t)/dr2 + (2/r)*dq(r,t)/dr )
q(r, t=0) = 0
dq(r=0, t)/dr = 0
dq(r=dp/2, t)/dr = (kf/Ds*rhop) [C(t) - Cp(at r = dp/2)]
q = solid phase concentration of trace compound in a particle with radius dp/2
C = bulk liquid concentration of trace compound
Cp = trace compound concentration at particle surface
I want to solve the above pde with initial and boundary conditions given. Tried Matlab's pdepe, but does not work satisfactorily. Maybe the boundary conditions is creating problem for me. I also used this isotherm equation for equilibrium: q = K*Cp^(1/n). This is convection-diffusion equation but i could not find any write ups that addresses solving this type of equation properly.
There are two problems with the current implementation.
Incorrect Source Term
The PDE you are attempting to solve has the form
which has the equivalent form
where the last term arises due to the factor of 2 in the original PDE.
The last term needs to be incorporated into pdepe via a source term.
Calculation of q average
The current implementation attempts to calculate the average value of q using the left and right values of q passed to the boundary condition function.
This is incorrect.
The average value of q needs to be calculated from a vector of up-to-date values of the quantity.
However, we have the complication that the only function to receive all mesh values is ParticleDiffusionpde; however, the mesh values passed to that function are not guaranteed to be from the mesh we provided.
Solution: use events (as described in the pdepe documentation).
This is a hack since the event function is meant to detect zero-crossings, but it has the advantage that the function is given all values of q on the mesh we provide.
So, the working example below (you'll notice I set all of the parameters to 1 since I didn't know better) uses the events function to update a variable qStore that can be accessed by the boundary condition function (see here for an explanation), and the boundary condition function performs a vectorized trapezoidal integration for the average calculation.
Working Example
function [] = ParticleDiffusion()
% Parameters
Ds = 1;
q0 = 0;
K = 1;
n = 1;
Amo = 1;
Gc = 1;
kf = 1;
rhop = 1;
% Space
rMesh = linspace(0,1,10);
rMesh = rMesh(:) ;
dr = rMesh(2) - rMesh(1) ;
% Time
tSpan = linspace(0,1,10);
% Vector to store current u-value
qStore = zeros(size(rMesh));
options.Events = #(m,t,x,y) events(m,t,x,y);
% Solve
[sol,~,~,~,~] = pdepe(1,#ParticleDiffusionpde,#ParticleDiffusionic,#ParticleDiffusionbc,rMesh,tSpan,options);
% Use the events function to update qStore
function [value,isterminal,direction] = events(m,~,~,y)
qStore = y; % Value of q on rMesh
value = m; % Since m is constant, it will never be zero (no event detection)
isterminal = 0; % Continue integration
direction = 0; % Detect all zero crossings (not important)
end
function [c,f,s] = ParticleDiffusionpde(r,~,~,DqDr)
% Define the capacity, flux, and source
c = 1/Ds;
f = DqDr;
s = DqDr./r;
end
function u0 = ParticleDiffusionic(~)
u0 = q0;
end
function [pl,ql,pr,qr] = ParticleDiffusionbc(~,~,R,ur,~)
% Calculate average value of current solution
qL = qStore(1:end-1);
qR = qStore(2: end );
total = sum((qL.*rMesh(1:end-1) + qR.*rMesh(2:end))) * dr/2;
qavg = 3/R^3 * total;
% Left boundary
pl = 0;
ql = 1;
% Right boundary
qr = 1;
pr = -(kf/(Ds.*rhop)).*(Amo - Gc.*qavg - (ur/K).^n);
end
end

MeanShift Clustering on dataset

I have a numeric dataset and I want to cluster data with a non-parametric algorithm. Basically, I would like to cluster without specifying the number of clusters for the input. I am using this code that I accessed through the MathWorks File Exchange network which implements the Mean Shift algorithm. However, I don't Know how to adapt my data to this code as my dataset has dimensions 516 x 19.
function [clustCent,data2cluster,cluster2dataCell] =MeanShiftCluster(dataPts,bandWidth,plotFlag)
%UNTITLED2 Summary of this function goes here
% Detailed explanation goes here
%perform MeanShift Clustering of data using a flat kernel
%
% ---INPUT---
% dataPts - input data, (numDim x numPts)
% bandWidth - is bandwidth parameter (scalar)
% plotFlag - display output if 2 or 3 D (logical)
% ---OUTPUT---
% clustCent - is locations of cluster centers (numDim x numClust)
% data2cluster - for every data point which cluster it belongs to (numPts)
% cluster2dataCell - for every cluster which points are in it (numClust)
%
% Bryan Feldman 02/24/06
% MeanShift first appears in
% K. Funkunaga and L.D. Hosteler, "The Estimation of the Gradient of a
% Density Function, with Applications in Pattern Recognition"
%*** Check input ****
if nargin < 2
error('no bandwidth specified')
end
if nargin < 3
plotFlag = true;
plotFlag = false;
end
%**** Initialize stuff ***
%[numPts,numDim] = size(dataPts);
[numDim,numPts] = size(dataPts);
numClust = 0;
bandSq = bandWidth^2;
initPtInds = 1:numPts
maxPos = max(dataPts,[],2); %biggest size in each dimension
minPos = min(dataPts,[],2); %smallest size in each dimension
boundBox = maxPos-minPos; %bounding box size
sizeSpace = norm(boundBox); %indicator of size of data space
stopThresh = 1e-3*bandWidth; %when mean has converged
clustCent = []; %center of clust
beenVisitedFlag = zeros(1,numPts,'uint8'); %track if a points been seen already
numInitPts = numPts %number of points to posibaly use as initilization points
clusterVotes = zeros(1,numPts,'uint16'); %used to resolve conflicts on cluster membership
while numInitPts
tempInd = ceil( (numInitPts-1e-6)*rand) %pick a random seed point
stInd = initPtInds(tempInd) %use this point as start of mean
myMean = dataPts(:,stInd); % intilize mean to this points location
myMembers = []; % points that will get added to this cluster
thisClusterVotes = zeros(1,numPts,'uint16'); %used to resolve conflicts on cluster membership
while 1 %loop untill convergence
sqDistToAll = sum((repmat(myMean,1,numPts) - dataPts).^2); %dist squared from mean to all points still active
inInds = find(sqDistToAll < bandSq); %points within bandWidth
thisClusterVotes(inInds) = thisClusterVotes(inInds)+1; %add a vote for all the in points belonging to this cluster
myOldMean = myMean; %save the old mean
myMean = mean(dataPts(:,inInds),2); %compute the new mean
myMembers = [myMembers inInds]; %add any point within bandWidth to the cluster
beenVisitedFlag(myMembers) = 1; %mark that these points have been visited
%*** plot stuff ****
if plotFlag
figure(12345),clf,hold on
if numDim == 2
plot(dataPts(1,:),dataPts(2,:),'.')
plot(dataPts(1,myMembers),dataPts(2,myMembers),'ys')
plot(myMean(1),myMean(2),'go')
plot(myOldMean(1),myOldMean(2),'rd')
pause
end
end
%**** if mean doesnt move much stop this cluster ***
if norm(myMean-myOldMean) < stopThresh
%check for merge posibilities
mergeWith = 0;
for cN = 1:numClust
distToOther = norm(myMean-clustCent(:,cN)); %distance from posible new clust max to old clust max
if distToOther < bandWidth/2 %if its within bandwidth/2 merge new and old
mergeWith = cN;
break;
end
end
if mergeWith > 0 % something to merge
clustCent(:,mergeWith) = 0.5*(myMean+clustCent(:,mergeWith)); %record the max as the mean of the two merged (I know biased twoards new ones)
%clustMembsCell{mergeWith} = unique([clustMembsCell{mergeWith} myMembers]); %record which points inside
clusterVotes(mergeWith,:) = clusterVotes(mergeWith,:) + thisClusterVotes; %add these votes to the merged cluster
else %its a new cluster
numClust = numClust+1 %increment clusters
clustCent(:,numClust) = myMean; %record the mean
%clustMembsCell{numClust} = myMembers; %store my members
clusterVotes(numClust,:) = thisClusterVotes;
end
break;
end
end
initPtInds = find(beenVisitedFlag == 0); %we can initialize with any of the points not yet visited
numInitPts = length(initPtInds); %number of active points in set
end
[val,data2cluster] = max(clusterVotes,[],1); %a point belongs to the cluster with the most votes
%*** If they want the cluster2data cell find it for them
if nargout > 2
cluster2dataCell = cell(numClust,1);
for cN = 1:numClust
myMembers = find(data2cluster == cN);
cluster2dataCell{cN} = myMembers;
end
end
This is the test code I am using to try and get the Mean Shift program to work:
clear
profile on
nPtsPerClust = 250;
nClust = 3;
totalNumPts = nPtsPerClust*nClust;
m(:,1) = [1 1];
m(:,2) = [-1 -1];
m(:,3) = [1 -1];
var = .6;
bandwidth = .75;
clustMed = [];
%clustCent;
x = var*randn(2,nPtsPerClust*nClust);
%*** build the point set
for i = 1:nClust
x(:,1+(i-1)*nPtsPerClust:(i)*nPtsPerClust) = x(:,1+(i-1)*nPtsPerClust:(i)*nPtsPerClust) + repmat(m(:,i),1,nPtsPerClust);
end
tic
[clustCent,point2cluster,clustMembsCell] = MeanShiftCluster(x,bandwidth);
toc
numClust = length(clustMembsCell)
figure(10),clf,hold on
cVec = 'bgrcmykbgrcmykbgrcmykbgrcmyk';%, cVec = [cVec cVec];
for k = 1:min(numClust,length(cVec))
myMembers = clustMembsCell{k};
myClustCen = clustCent(:,k);
plot(x(1,myMembers),x(2,myMembers),[cVec(k) '.'])
plot(myClustCen(1),myClustCen(2),'o','MarkerEdgeColor','k','MarkerFaceColor',cVec(k), 'MarkerSize',10)
end
title(['no shifting, numClust:' int2str(numClust)])
The test script generates random data X. In my case. I want to use the matrix D of size 516 x 19 but I am not sure how to adapt my data to this function. The function is returning results that are not agreeing with my understanding of the algorithm.
Does anyone know how to do this?