Background subtraction And foreground detection using Kalman Filter - matlab

I need to separate the background from the foreground in a video using Kalman filter. Can somebody give me some resources or code examples to follow.
Update: i've found a good example here Traffic detection.It worked excellent for Traffic detection but i want to re-adapt it for people extraction. I've found some variables that's need to be adapted for example:
1. alpha = learning constant
2. K= #of guassians in the mixture
3. T = min. portion of background
4. initVariance = initial variance
5. pixelThresh = threshold condition for computing adaptive process on pixel
Here is an overview of the main file (In case just you want an overview)
function foregroundEstimation(movie)
%Load video into memory and prepare output video for writing
v = VideoReader(movie);
numFrames = 150;
foregroundVideo = VideoWriter('foreground.avi');
open(foregroundVideo);
%video constants
initFrame = read(v,1);
global height;
global width;
height = size(initFrame,1);
width = size(initFrame,2);
%initialize GMM parameters: (based upon Stauffer notation)
%http://www.ai.mit.edu/projects/vsam/Publications/stauffer_cvpr98_track.pdf
% alpha = learning constant
% K= #of guassians in the mixture
% T = min. portion of background
% initVariance = initial variance
% pixelThresh = threshold condition for computing adaptive process on pixel
alpha=0.05; % 0.05
K=5; % 5
global T; % T = 0.8
T=0.8;
global initVariance;
initVariance=75; % 75
pixelThresh=45; % 45
referenceDistance = 40; % 40 %shortcut to speed up processing time. Compare current pixel to pixel referenceDistance frames back and skip adaptive process if similar. Downside is doesn't collect background evidence as well.
sideWeight = (K-1)/(1-T);
global matchThres;
matchThres = sqrt(3);
global ccThreshold;
ccThreshold = 9000; % 5000
global deltaT;
deltaT = 1;
global numParticles;
numParticles = 100;
trackingOn = 0; % will superimpose tracking color marker on detected vehicles in output video. tackingOn should equal 1 or 0
prevCentSize = 0;
%structures to pass information between frames for detection purposes.
field = 'f';
filterValue = {[];[];};
prevFilter = struct(field,filterValue);
modelValue = {[];prevFilter};
prevModel = struct(field,modelValue);
%initiailze video process components
initColorBoxes();
foreFrame = zeros(height,width,3);
backFrame = zeros(height,width,3);
%map represents pixels at a given frame to perform adaptive process
pixThreshMap = ones(height,width);
%individual pixel process components
pixel = zeros(3,1);
pixMean = zeros(3,1,K);
pixVar = ones(1,K);
pixWeight = zeros(1,K);
%global pixel process components
globalWeight = (ones(height,width,K)/sideWeight);
globalWeight(:,:,1) = T;
%globalWeight = (ones(height,width,K)/K);
globalMean = zeros(height,width,3,K);
globalVar = ones(height,width,K);
%=====================================================
%Extract Foreground and Background by K-mixture model
%=====================================================
%initialize g-mixture model
globalVar = globalVar*initVariance;
for k=1:K
globalMean(:,:,1,k)=initFrame(:,:,1);
globalMean(:,:,2,k)=initFrame(:,:,2);
globalMean(:,:,3,k)=initFrame(:,:,3);
end;
distVec = zeros(numFrames,1);
%adaptive g-mixture background segmentation
for frameIndex=2:numFrames
%get current frame and the refernece frame
%tic;
frameIndex
currentFrame = double(read(v,frameIndex));
if (frameIndex<=referenceDistance)
referenceFrame= double(read(v,1));
else
referenceFrame= double(read(v,frameIndex-referenceDistance));
end;
frameDifference = abs(currentFrame - referenceFrame);
%creates map of pixel locations where we will perform adaptive process. Based
%upon threshold that detects low change regions based on previous frame in
%order to save computation.
pixThreshMap = min(sum(+(frameDifference(:,:,:)>pixelThresh),3),1);
for index=1:3
backFrame(:,:,index)=(+(pixThreshMap(:,:)==0)).*currentFrame(:,:,index);
end;
%extract the parts considered "stable background" from current frame
%reset foreground frame
foreFrame = ones(height,width,3)*255;
%gaussian mixture matching & model updating
[i,j]=find(pixThreshMap(:,:)==1);
%loop through every pixel location where adaptive process should be performed
for k = 1:size(i,1)
pixel = reshape(currentFrame(i(k),j(k),:),3,1);
pixMean = reshape(globalMean(i(k),j(k),:,:),3,1,K);
pixVar = reshape(globalVar(i(k),j(k),:,:),1,K);
pixWeight=reshape(globalWeight(i(k),j(k),:),1,K);
%update gaussian mixture according to new pix value
match=matchingCriterion(pixMean,pixVar,pixel);
matchWeight = 0;
if(match>0)
%match found so update weights/normalize
pixWeight = (1-alpha)*pixWeight;
pixWeight(match)= pixWeight(match) + alpha;
pixWeight = pixWeight/sum(pixWeight);
matchWeight = pixWeight(1,match);
%NOTE ALPHA SHOULD BE REPACED WITH SOME KIND OF RHO EVENTUALLY
%WHERE RHO IS PRODUCT OF ALPHA AND CONDITIONAL PROBABILITY MEASURE
%update variance
pixVar(:,match) = (1-alpha)*pixVar(:,match) + ...
alpha*(pixel - pixMean(:,:,match))'*(pixel-pixMean(:,:,match));
%update mean
pixMean(:,:,match) = (1-alpha)*pixMean(:,:,match) + alpha*pixel;
else
%no match currently found.
%replace one with lowest sigma value
rankVector = pixWeight./sqrt(pixVar(1,:));
[mini minIndex] = min(rankVector);
pixMean(:,:,minIndex) = pixel;
pixVar(:,minIndex) = initVariance;
end
%rerank all pixel components
rankCriterionVector = pixWeight./sqrt(pixVar(1,1,:));
[rankSort rankIndex] = sort(rankCriterionVector);
pixMean = pixMean(rankIndex);
pixVar = pixVar(rankIndex);
pixWeight = pixWeight(rankIndex);
%repopulate global structures with updated values
globalWeight(i(k),j(k),:) = pixWeight(:,1);
globalMean(i(k),j(k),:,:) = pixMean(:,1,:);
globalVar(i(k),j(k),:,:) = pixVar(:,:,:);
%now need to perform the background segmentation based upon weight
%threshold
bgIndex = segmentBackground(pixWeight);
if(ismember(matchWeight, pixWeight))
matchIndex = find(pixWeight == matchWeight,1);
else
matchIndex = 0;
end
if((matchIndex>=bgIndex) || (matchIndex == 0))
%also check against initFrame for match
%NOTE CHANGE
if(initMatch(initFrame(i(k),j(k),:),pixel) == 0)
foreFrame(i(k),j(k),:) = pixel;
end
end
end
%Now write foreground frame to foreground estimation video
contrastFrame = foreFrame/max(abs(foreFrame(:)));
%remove all connected components associated with foreground objects that are smaller than what we expect a vehicle should be.
[cleanFrame centroids]= connectedComponentCleanup(contrastFrame);
if(trackingOn == 1)
if(size(centroids,1) > prevCentSize)
prevModel = addModel(prevModel, centroids, height, width);
elseif (size(centroids,1) < prevCentSize)
prevModel = removeModel(prevModel, centroids, height, width);
end
if(size(centroids,1) > 0)
%implies there is a car in frame for tracking
[curModel orderedCentroids] = vehicleTracking(centroids, prevModel);
prevModel = curModel;
trackFrame = colorBox(cleanFrame, curModel,orderedCentroids, height, width);
else
trackFrame = cleanFrame;
end
else
trackFrame = cleanFrame;
end
writeVideo(foregroundVideo,trackFrame);
prevCentSize = size(centroids,1);
end
Thanks.

Basically this is not task for kalman filter, look at background subtraction in opencv.
Kalman filter could be used after that to track objects between frames, you could look for example here
http://studentdavestutorials.weebly.com/kalman-filter-with-matlab-code.html

Related

What is the better way to change the percentages of the training and the testing during the splitting process?

With using the PCA technique and the Yale database, I'm trying to work on face recognition within Matlab by randomly splitting the training process to 20% and the testing process to 80%. It is given an
Index in position 2 exceeds array bounds (must not exceed 29)
error. The following is the code, hoping to get help:
dataset = load('yale_FaceDataset.mat');
trainSz = round(dataset.samples*0.2);
testSz = round(dataset.samples*0.8);
trainSetCell = cell(1,trainSz*dataset.classes);
testSetCell = cell(1,testSz*dataset.classes);
j = 1;
k = 1;
m = 1;
for i = 1:dataset.classes
% training set
trainSetCell(k:k+trainSz-1) = dataset.images(j:j+trainSz-1);
trainLabels(k:k+trainSz-1) = dataset.labels(j:j+trainSz-1);
k = k+trainSz;
% test set
testSetCell(m:m+testSz-1) = dataset.images(j+trainSz:j+dataset.samples-1);
testLabels(m:m+testSz-1) = dataset.labels(j+trainSz:j+dataset.samples-1);
m = m+testSz;
j = j+dataset.samples;
end
% convert the data from a cell into a matrix format
numImgs = length(trainSetCell);
trainSet = zeros(numImgs,numel(trainSetCell{1}));
for i = 1:numImgs
trainSet(i,:) = reshape(trainSetCell{i},[],1);
end
numImgs = length(testSetCell);
testSet = zeros(numImgs,numel(testSetCell{1}));
for i = 1:numImgs
testSet(i,:) = reshape(testSetCell{i},[],1);
end
%% applying PCA
% compute the mean face
mu = mean(trainSet)';
% centre the training data
trainSet = trainSet - (repmat(mu,1,size(trainSet,1)))';
% generate the eigenfaces(features of the training set)
eigenfaces = pca(trainSet);
% set the number of principal components
Ncomponents = 100;
% Out of the generated components, we keep "Ncomponents"
eigenfaces = eigenfaces(:,1:Ncomponents);
% generate training features
trainFeatures = eigenfaces' * trainSet';
% Subspace projection
% centre features
testSet = testSet - (repmat(mu,1,size(testSet,1)))';
% subspace projection
testFeatures = inv(eigenfaces'*eigenfaces) * eigenfaces' * testSet';
mdl = fitcdiscr(trainFeatures',trainLabels);
labels = predict(mdl,testFeatures');
% find the images that were recognised and their respect. labels
correctRec = find(testLabels == labels');
correctLabels = labels(correctRec);
% find the images that were NOT recognised and their respect. labels
falseRec = find(testLabels ~= labels');
falseLabels = labels(falseRec);
% compute and display the recognition rate
result = length(correctRec)/length(testLabels)*100;
fprintf('The recognition rate is: %0.3f \n',result);
% divide the images into : recognised and unrecognised
correctTest = testSetCell(correctRec);
falseTest = testSetCell(falseRec);
% display some recognised samples and their respective labels
imgshow(correctTest(1:8),correctLabels(1:8));
% display all unrecognised samples and their respective labels
imgshow(falseTest(1:length(falseTest)), falseLabels(1:length(falseTest)));
it would be nice, if you provide also the line-number and the full message of the error and if you would strip your code to the essential. I guess, the PCA-stuff is not necessary here, as the error is raised probably in your loop. That is because you are incrementing j by j = j+dataset.samples; and take this in the next loop-set for indexing j:j+trainSz-1, which now must exceed dataset.samples...
Nevertheless, there is no randomness in the indexing. It is easiest if you use the built-in cvpartition-function:
% split data
cvp = cvpartition(Lbl,'HoldOut',.2);
lgTrn = cvp.training;
lgTst = cvp.test;
You may provide the number of classes as first input (Lbl in this case) or the actual class vector to let cvpartition pick random subsets that reflect the original distribution of the individual classes.

Spheroids detction from Images

I need to find the blobs from the below image.
The major problem is background. background doesn't have uniform intensity. I tried couple of things like thresholding and edge detection in MATLAB but couldn't able to find a better ways to segment out all spheroids. I need to extract the blobs and I need to find the area of each blobs. Does anyone know how to work-around with this kind of background?
Edit (07/02/17):
As suggested by Spektre I tried Following things in MATLAB.
Method 1:
img_original = imread('~/my_image.jpg'); %Read image
img_ch = single(img_original(:,:,2)); %Pick one channel( here its green)
g = fspecial('gaussian',200,100); %Kernel matrix to make the img blurr
con_img = conv2(img_ch,g,'same'); %2D convolution, this wil make the img blurr
sub_img = (con_img - img_ch); %Simple matrix subtraction
sub_img(sub_img <= 10) = 0; %Thresholding
sub_img(sub_img ~= 0) = 1;
fil_sub = imfill(sub_img,'holes'); %Fill the regions
imgfilt = imfilter(fil_sub, ones(3)); %run filter using 3by3 matrx
imgfilt(imgfilt < 8) = 0; %Reduce noisy pixels by thresholding
mfilt_img = (medfilt2(imgfilt)); %reduce unwanted pixels
img = img_ch;
img(mfilt_img ~= 0) = 255;
img2 = img_ch;
img2(img2 < 70) = 0; %Threshold for darker pixels which are left out from above methode.
img(img2 ==0) = 255;
disp_img = img_original(:,:,1);
disp_img(img ==255) = 255;
img_original(:,:,1) = disp_img;
figure, imshow(img_original)
I got the segments but still not good enough I think. This method gave good segments in the high intensity background, Even if I reduce the threshold value segments are not clear in the darker background and brightest pixels in the blobs are excluded.
Method 2:
img_original = imread('~/cancer_cells/Snap-10234.jpg'); %Read image
img_ch = single(img_original(:,:,2)); %Pick one channel( here its green)
clear new_matcel cur_img matcel avg_matrx
s=3; % Set size of the square window
mat_img = img_ch; % Working image channel
% resize the working matrix so that the dimensions matches
resize_img = resizem(mat_img,round(size(mat_img)/s)*s);
% convert matrix into small s x s matrix and save each in cells
window_c = ones(1,size(resize_img,1)/s) * s;
window_r = ones(1,size(resize_img,2)/s) * s;
mat_cel = mat2cell(resize_img,window_c,window_r);
new_matcel = cell(size(mat_cel)); % initialize new variable
% get the average value for each window and replace the actual by avg value
for i = 1:size(mat_cel,1)
for j = 1:size(mat_cel,2)
cur_img = mat_cel{i,j};
avg_value = mean(mean(cur_img));
new_matcel{i,j} = ones(s) * avg_value;
end
end
avg_matrx = cell2mat(new_matcel); % convert cells to matrix
image_sub = (abs(resize_img - avg_matrx)); % take the absolute difference
image_sub(image_sub < 7) = 0; % thresholding
image_sub(image_sub~=0) = 1;
image_sub = bwmorph(image_sub,'bridge');% fill gaps
image_sub = imfill(image_sub,'holes'); % fill the bounded regioons
% image_sub(image_sub == 1) = 255;
image_sub = resizem(image_sub,size(img_ch)); % resize to original size
disp_img = img_original(:,:,1);
disp_img(image_sub == 1) = 255;
img_original(:,:,1) = disp_img;
figure, imshow(img_original)
Much better segmented image:
even brighter pixels are included in the segment. Thanks to Spektre.
Is there a way to improve the above code? or any other idea to get more precise segments?
Thanks.

How to improve the OCR accuracy rate of Neural Network in Matlab

I'm working on OCR for Arabic character. I want to try glcm as a features extraction method. I've got the code here: http://www.mathworks.com/matlabcentral/fileexchange/22187-glcm-texture-features
Example of input images (character images):
and I've made a code to get the GLCM output based on needed features. Here it is:
function features = EkstraksiFitur_GLCM(x)
glcm = graycomatrix(x,'offset',[0 1; -1 1; -1 0; -1 -1], 'NumLevels', 2);
stats = GLCM_Features1(glcm, 0);
autocorrelation = double(mean (stats.autoc));
if isnan(autocorrelation)
autocorrelation=0;
else
autocorrelation=autocorrelation;
end
contrast = double(mean(stats.contr));
if isnan(contrast)
contrast=0;
else
contrast=contrast;
end
Correlation = double(mean (stats.corrm));
if isnan(Correlation)
Correlation=0;
else
Correlation=Correlation;
end
ClusterProminence = double(mean (stats.cprom));
if isnan(ClusterProminence)
ClusterProminence=0;
else
ClusterProminence=ClusterProminence;
end
ClusterShade = double(mean (stats.cshad));
if isnan(ClusterShade)
ClusterShade=0;
else
ClusterShade=ClusterShade;
end
Dissimilarity = double(mean (stats.dissi));
if isnan(Dissimilarity)
Dissimilarity=0;
else
Dissimilarity=Dissimilarity;
end
Energy = double(mean (stats.energ));
if isnan(Energy)
Energy=0;
else
Energy=Energy;
end
.
.
.
features=[autocorrelation, contrast, Correlation, Dissimilarity, Energy, Entropy, Homogeneity, MaximumProbability, SumAverage, SumVariance, SumEntropy, DifferenceVariance, DifferenceEntropy, InverseDifferenceMomentNormalized];
Using loop to get the features of all the images (data train):
srcFile = dir('D:\1. Thesis FINISH!!!\Data set\0 Well Segmented Character\Advertising Bold 24\datatrain\*.png');
fetrain = [];
for a = 1:length(srcFile)
file_name = strcat('D:\1. Thesis FINISH!!!\Data set\0 Well Segmented Character\Advertising Bold 24\datatrain\',srcFile(b).name);
A = imread(file_name);
[gl] = EkstraksiFitur_GLCM2 (A);
[fiturtrain] = reshape (gl, [56,1]) ;
fetrain = [fetrain fiturtrain];
% vectorname = strcat(file_name,'_array.mat');
end
save ('fetrain.mat','fetrain');
I've got the features.
And then run the training process using Neural Network, but I get a very low accuracy rate. This is the code:
% clc;clear;close all;
% function net1 = pelatihan (input, target)
net = newff(fetrain,target,[10 2],{'tansig','tansig'},'trainscg');
% net.trainParam.mem_reduc = 2;
net.performFcn = 'mse';
net.divideFcn = 'dividetrain';
% [trainInd,valInd,testInd] = dividetrain(601);
net.trainParam.show = 10; % Frequency of progress displays (in epochs).
net.trainParam.epochs = 1000; %default 1000
net.trainParam.goal = 1e-6;
net = train(net,fetrain,target);
output = round(sim(net,fetrain));
save net1.mat net
% net2 = output;
data = fetest;
[target; output];
prediksi = round(sim (net, data));
[targetx; prediksi];
%% Calculate the accuracy %
y = 1;
j = size (prediksi, 2);
% x = size (targetx, 2);
for i = 1:j
if prediksi (i) == targetx (i)
y =y+1;
else
y;
end
end
% y all correct data
% j all data
s = 'The accuracy is %.2f%%';
acc = 100 *(y/j);
sprintf (s,acc)
I've tried several times, but the accuracy rate (NN test result) wasn't improve. It's contantly give output 1.96%. Is there something wrong with the process flow, or with the code that i've made?
Any help would be very helpful and appreciated
First I can see from the feature you extracted that they are nnot normalized and they vary in range. which means some of the fetaure wil dominate the rest. try to normalize or standarize the features. is the accuracy you measure on training set only or you are some test set or cross validation methods? is it true what I see you are using 601 features? did you try features selection methods to decide which features belong better to the data and the model?
Second I would like to know what you are implementing for the structure instead of reading the full code to understand what you have done.
third would be intersting to look at the input image to understand the enviremoent you are dealing with.

Saving a series of manually selected points

Just a quick question. I have generated a piece of code whereby I select the point of interest in every frame. However, the coordinates of the last selected point is the only one that is saved. Does anyone know how to set up the code such that all the points for each frame are saved for example in a text file with x position in column 1 and y position in column 2? Here is the code I have developed so far;
clear;
clc;
%% Video file information
obj = VideoReader('T9_720p_60p_60mm_f5.MOV');
%% Sampling rate
fps = get(obj, 'FrameRate');
dt = 1/fps;
%% Image Information
file_info = get(obj);
image_width = file_info.Width;
image_height = file_info.Height;
%%Desired image size
x_range = 1:image_height;
y_range = 1:image_width;
szx = length(x_range);
szy = length(y_range);
%%Image processing - Point selection
for n = 33:115
frame = read(obj,n);
imshow(frame);
hpoint = impoint(gca, []);
Position = getPosition(hpoint);
end
I just realized all I need was to add the following before the end of the loop
%%Save data
n = n-32;
data(n,:) = [Position];
end
Regards

Code for peak detection

I want to calculate if a real-time signal pass some thresholds in the first step. In the first step, I want to detect if the real signal pass under those thresholds (in order to detect a peak in the signal). My Matlab code:
k=1;
t = 1;
l=1;
for i =1:length(sm) //sm my signal.
if (sm(i) > 0.25)
first(k) = i;
k = k+1;
if (sm(i) > 0.5)
second(t) = i;
t =t +1;
if (sm(i) > 0.75)
third(l) = i;
l = l+1;
end
end
end
end
Example:
![enter image description here][1]
I want to calculate the times that the signal pass over and under the three thresholds 0.25, 0.5, 0.75 and to return those windows. Basically the three main peaks that I have in my example.
Basically what am I trying to do is to use fastsmooth function and findpeaks.
signalSmoothed = fastsmooth(sm,50); plot(signalSmoothed)
[max_pk1 max_pk2] = findpeaks(signalSmoothed);
find(max_pk1 >0.5)
inversex = 1.01*max(signalSmoothed) - signalSmoothed;
[min_pk1 min_pk2] = findpeaks(inversex);
find(min_pk1 >0.5)
Which are the heuristics in order to take only the desired peaks? Moreover the depticted image is an offline example. Generally I want to perform the technique online.
EDIT: I wrongfully defined as peak my desired curve result which is the whole wave not just the max value.
Here is a solution to get the points where the signal sm passes the thresholds 0.25, 0.50 and 0.75. The points can be converted into windows inside the data-range and get stored in W. Finally we can plot them easily in the same figure. Note that we need to do some checks in the local function getwindows to handle special cases, for example when the window starts outside the data-range. The detection of windows inside another window is done in the getwindowsspecial-function.
Here is the code:
function peakwindow
% generate sample data
rng(7);
sm = 2*rand(1,25)-0.5;
sm = interp1(1:length(sm),sm,1:0.01:100*length(sm));
% get points
firstup = find(diff(sm > 0.25)==1);
secondup = find(diff(sm > 0.50)==1);
thirdup = find(diff(sm > 0.75)==1);
firstdown = find(diff(sm < 0.25)==1);
seconddown = find(diff(sm < 0.50)==1);
thirddown = find(diff(sm < 0.75)==1);
% plot the result
figure; hold on;
plot(sm,'k')
plot(firstup,sm(firstup),'*')
plot(firstdown,sm(firstdown),'*')
plot(secondup,sm(secondup),'*')
plot(seconddown,sm(seconddown),'*')
plot(thirdup,sm(thirdup),'*')
plot(thirddown,sm(thirddown),'*')
% get windows
W1 = getwindows(firstup,firstdown);
W2 = getwindows(secondup,seconddown);
W3 = getwindows(thirdup,thirddown);
% get special window
WS = getwindowsspecial(W1,W3);
% plot windows
plotwindow(W1,0.25,'r');
plotwindow(W2,0.50,'r');
plotwindow(W3,0.75,'r');
plotwindow(WS,0,'b-');
function W = getwindows(up,down)
if length(up)>1 && length(down)>1 && up(1)>down(1)
down(1)=[]; % handle case when window begins out of bounds left
end
if length(up)<1 || length(down)<1;
W = []; % handle if no complete window present
else
% concatenate and handle case when a window ends out of bounds right
W = [up(1:length(down));down]';
end
function plotwindow(W,y,lspec)
for i = 1:size(W,1)
plot(W(i,:),[y,y],lspec)
end
% get windows of U where there is a window of H inside
function W = getwindowsspecial(U,H)
W = []; % empty matrix to begin with
for i = 1:size(U,1) % for all windows in U
if any(H(:,1)>=U(i,1) & H(:,1)<=U(i,2))
W = [W;U(i,:)]; % add window
end
end
This is the result:
To see that the handling works properly, we can plot the result when initialized with rng(3):
Note that the window of 0.25 and 0.50 would start out of bounds left and therefore is not present in the plotted windows.