I'm doing a project on tracking and Object classification in video surveillance,and i have some difficulties with removing the shadows from the foreground objects.
The problem is that i can't just use any method that i want, i have to use the same method described in this article Shadow removal with blob based morphological reconstruction .
In the article they use a hybrid shadow removal method- RGB color based detection and texture based detection. Both the colour and texture based procedures are used in parallel, followed by an assertion process that combines the results of the two.
So i would like to get some help with the shadow removal Matlab code.
my code so far
function Tracking_Objects()
% Create System objects used for reading video, detecting moving objects,
% and displaying the results.
obj = setupSystemObjects();
tracks = initializeTracks(); % Create an empty array of tracks.
nextId = 1; % ID of the next track
% Detect moving objects, and track them across video frames.
while ~isDone(obj.reader)
frame = readFrame();
[centroids, bboxes, mask] = detectObjects(frame);
predictNewLocationsOfTracks();
[assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment();
updateAssignedTracks();
updateUnassignedTracks();
deleteLostTracks();
createNewTracks();
displayTrackingResults();
end
%% Create System Objects
function obj = setupSystemObjects()
% Initialize Video I/O
% Create objects for reading a video from a file, drawing the tracked
% objects in each frame, and playing the video.
% Create a video file reader.
obj.reader = vision.VideoFileReader('input.avi');
% Create two video players, one to display the video,
% and one to display the foreground mask.
obj.videoPlayer = vision.VideoPlayer('Position', [10, 250, 700, 400]);
obj.maskPlayer = vision.VideoPlayer('Position', [720, 250, 700, 400]);
obj.detector = vision.ForegroundDetector('NumGaussians', 3, ...
'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7);
obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ...
'AreaOutputPort', true, 'CentroidOutputPort', true, ...
'MinimumBlobArea', 75);
end
%% Initialize Tracks
function tracks = initializeTracks()
% create an empty array of tracks
tracks = struct(...
'id', {}, ...
'bbox', {}, ...
'kalmanFilter', {}, ...
'age', {}, ...
'totalVisibleCount', {}, ...
'consecutiveInvisibleCount', {});
end
%% Read a Video Frame
% Read the next video frame from the video file.
function frame = readFrame()
frame = obj.reader.step();
end
%% Detect Objects
function [centroids, bboxes, mask] = detectObjects(frame)
% Detect foreground.
mask = obj.detector.step(frame);
% Apply morphological operations to remove noise and fill in holes.
mask = imopen(mask, strel('rectangle', [3,3]));
mask = imclose(mask, strel('rectangle', [15, 15]));
mask = imfill(mask, 'holes');
% Perform blob analysis to find connected components.
[~, centroids, bboxes] = obj.blobAnalyser.step(mask);
end
%% Predict New Locations of Existing Tracks
function predictNewLocationsOfTracks()
for i = 1:length(tracks)
bbox = tracks(i).bbox;
% Predict the current location of the track.
predictedCentroid = predict(tracks(i).kalmanFilter);
% Shift the bounding box so that its center is at
% the predicted location.
predictedCentroid = int32(predictedCentroid) - bbox(3:4) / 2;
tracks(i).bbox = [predictedCentroid, bbox(3:4)];
end
end
%% Assign Detections to Tracks
function [assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment()
nTracks = length(tracks);
nDetections = size(centroids, 1);
% Compute the cost of assigning each detection to each track.
cost = zeros(nTracks, nDetections);
for i = 1:nTracks
cost(i, :) = distance(tracks(i).kalmanFilter, centroids);
end
% Solve the assignment problem.
costOfNonAssignment = 20;
[assignments, unassignedTracks, unassignedDetections] = ...
assignDetectionsToTracks(cost, costOfNonAssignment);
end
%% Update Assigned Tracks
function updateAssignedTracks()
numAssignedTracks = size(assignments, 1);
for i = 1:numAssignedTracks
trackIdx = assignments(i, 1);
detectionIdx = assignments(i, 2);
centroid = centroids(detectionIdx, :);
bbox = bboxes(detectionIdx, :);
% Correct the estimate of the object's location
% using the new detection.
correct(tracks(trackIdx).kalmanFilter, centroid);
% Replace predicted bounding box with detected
% bounding box.
tracks(trackIdx).bbox = bbox;
% Update track's age.
tracks(trackIdx).age = tracks(trackIdx).age + 1;
% Update visibility.
tracks(trackIdx).totalVisibleCount = ...
tracks(trackIdx).totalVisibleCount + 1;
tracks(trackIdx).consecutiveInvisibleCount = 0;
end
end
%% Update Unassigned Tracks
% Mark each unassigned track as invisible, and increase its age by 1.
function updateUnassignedTracks()
for i = 1:length(unassignedTracks)
ind = unassignedTracks(i);
tracks(ind).age = tracks(ind).age + 1;
tracks(ind).consecutiveInvisibleCount = ...
tracks(ind).consecutiveInvisibleCount + 1;
end
end
%% Delete Lost Tracks
function deleteLostTracks()
if isempty(tracks)
return;
end
invisibleForTooLong = 20;
ageThreshold = 8;
% Compute the fraction of the track's age for which it was visible.
ages = [tracks(:).age];
totalVisibleCounts = [tracks(:).totalVisibleCount];
visibility = totalVisibleCounts ./ ages;
% Find the indices of 'lost' tracks.
lostInds = (ages < ageThreshold & visibility < 0.6) | ...
[tracks(:).consecutiveInvisibleCount] >= invisibleForTooLong;
% Delete lost tracks.
tracks = tracks(~lostInds);
end
%% Create New Tracks
function createNewTracks()
centroids = centroids(unassignedDetections, :);
bboxes = bboxes(unassignedDetections, :);
for i = 1:size(centroids, 1)
centroid = centroids(i,:);
bbox = bboxes(i, :);
% Create a Kalman filter object.
kalmanFilter = configureKalmanFilter('ConstantVelocity', ...
centroid, [200, 50], [100, 25], 100);
% Create a new track.
newTrack = struct(...
'id', nextId, ...
'bbox', bbox, ...
'kalmanFilter', kalmanFilter, ...
'age', 1, ...
'totalVisibleCount', 1, ...
'consecutiveInvisibleCount', 0);
% Add it to the array of tracks.
tracks(end + 1) = newTrack;
% Increment the next id.
nextId = nextId + 1;
end
end
%% Display Tracking Results
% The |displayTrackingResults| function draws a bounding box and label ID
% for each track on the video frame and the foreground mask. It then
% displays the frame and the mask in their respective video players.
function displayTrackingResults()
% Convert the frame and the mask to uint8 RGB.
frame = im2uint8(frame);
mask = uint8(repmat(mask, [1, 1, 3])) .* 255;
minVisibleCount = 8;
if ~isempty(tracks)
% Noisy detections tend to result in short-lived tracks.
% Only display tracks that have been visible for more than
% a minimum number of frames.
reliableTrackInds = ...
[tracks(:).totalVisibleCount] > minVisibleCount;
reliableTracks = tracks(reliableTrackInds);
% Display the objects. If an object has not been detected
% in this frame, display its predicted bounding box.
if ~isempty(reliableTracks)
% Get bounding boxes.
bboxes = cat(1, reliableTracks.bbox);
% Get ids.
ids = int32([reliableTracks(:).id]);
% Create labels for objects indicating the ones for
% which we display the predicted rather than the actual
% location.
labels = cellstr(int2str(ids'));
predictedTrackInds = ...
[reliableTracks(:).consecutiveInvisibleCount] > 0;
isPredicted = cell(size(labels));
isPredicted(predictedTrackInds) = {' predicted'};
labels = strcat(labels, isPredicted);
% Draw the objects on the frame.
frame = insertObjectAnnotation(frame, 'rectangle', ...
bboxes, labels);
% Draw the objects on the mask.
mask = insertObjectAnnotation(mask, 'rectangle', ...
bboxes, labels);
end
end
% Display the mask and the frame.
obj.maskPlayer.step(mask);
obj.videoPlayer.step(frame);
end
end
It would appear that you would need to modify the detectObjects function with the code to eliminate shadows. Currently, all it does is it takes a foreground mask from background subtraction (vision.ForegroundDetector), removes some noise and fills in small gaps using morphology, and then it finds connected components, a.k.a. "blobs". Typically, the blobs do include shadows.
If your shadow removal algorithm operates on a single frame, than this is where the code for it would go.
I think your problem is in your object detection. Your code is using an imopen and and imclose. But I think you want to use imdilate and imerode
%% Detect Objects
function [centroids, bboxes, mask] = detectObjects(frame)
% Detect foreground.
mask = obj.detector.step(frame);
% Apply morphological operations to remove noise and fill in holes.
%this makes the white space smaller to remove small dots/noise
mask = imerode(mask, strel('rectangle', [3,3]));
%this makes the white regions larger to restore the original size
mask = imclose(mask, strel('rectangle', [15, 15]));
mask = imfill(mask, 'holes');
% Perform blob analysis to find connected components.
[~, centroids, bboxes] = obj.blobAnalyser.step(mask);
end
I changed your imopen and imclose you may need to change your structuring elements as well. If you look at the matlab documentation imopen is the same as erosion followed by dilation, or imopened = imdilate(imerode(im,se)));
Imclose is dilation followed by erosion or imcloseed = imerode(imdilate(im,se))
the paper is using an erosion followed by a dilation (an imopen operation). So that is what I put in the code. If you want to use the same structuring element for both operations you can change
%this makes the white space smaller to remove small dots/noise
mask = imerode(mask, strel('rectangle', [3,3]));
%this makes the white regions larger to restore the original size
mask = imclose(mask, strel('rectangle', [15, 15]));
to
mask = imopen(mask,strel('rectangle',[15 15]));
since you didn't include your pictures I can't try it out, but I think it should work after you play with your strel function
Related
I am trying to remove moving shadows in video using the 'stationary wavelet transform technique' as mentioned in the reference paper. I coded in MATLAB but not getting expected output. I am begineer in MATLAB so Can anyone please review my code to check whether it followed all the steps as mentioned in the paper.
Recap of the algorithm followed in the reference paper:
(i) Convert the rgb video frame to hsv
(ii)split the hsv into individual component
(iii)Find the absolute difference between h,s and v component of current and background frame
(iv)Apply the SWT transformation on difference 's' and 'v'
(V)Compute the skewness value for the swt output
(vi)For shadow detection: If the swt output of 'v' greater than its skewness, assign those pixel value to '1' else'0'
(vii) For shadow removal: thresholding operation is applied based on swt output of's' reference paper
function shadowremoval()
obj = setupSystemObjects();
while ~isDone(obj.reader)
frame = readFrame();
mask1 = shadow(frame);
displayTrackingResults();
end
%% Create System Objects
function obj = setupSystemObjects()
% Create a video file reader.
obj.reader = vision.VideoFileReader('visiontraffic.avi');
% Create two video players, one to display the video,
% and one to display the foreground mask.
obj.videoPlayer = vision.VideoPlayer('Position', [10, 250, 700, 400]);
obj.maskPlayer = vision.VideoPlayer('Position', [720, 250, 700, 400]);
obj.detector = vision.ForegroundDetector('NumGaussians', 3, ...
'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7);
end
%% Read a Video Frame
% Read the next video frame from the video file.
function frame = readFrame()
frame = obj.reader.step();
end
%% Perform the operation to remove shadows
function mask1 = shadow(frame)
Background=0.0;
% Detect foreground.
mask1 = obj.detector.step(frame);
mask1 = uint8(repmat(mask1, [1, 1, 3])) .* 255;
% Apply morphological operations to remove noise and fill in holes.
% mask1 = imerode(mask1, strel('rectangle', [3,3]));
% mask1 = imclose(mask1, strel('rectangle', [15, 15]));
mask1 = imopen(mask1, strel('rectangle', [15,15]));
mask1 = imfill(mask1, 'holes');
% Now let's do the differencing
alpha = 0.5;
if frame == 1
Background = frame;
else
% Change background slightly at each frame
% Background(t+1)=(1-alpha)*I+alpha*Background
Background = (1-alpha)* frame + alpha * Background;
end
% Do color conversion from rgb to hsv
x=rgb2hsv(mask1);
y=rgb2hsv(Background);
% Split the hsv component to h,s,v value
Hx = x(:,:,1);
Sx = x(:,:,2);
Vx = x(:,:,3);
Hy = y(:,:,1);
Sy = y(:,:,2);
Vy = y(:,:,3);
% Calculate a difference between this frame and the background.
dh=(abs(double(Hx) - double(Hy)));
ds1=(abs(double(Sx) - double(Sy)));
dv1=(abs(double(Vx) - double(Vy)));
% Perform the 'swt'
[as,hs,vs,ds] = swt2(ds1,1,'haar');
[av,hv,vv,dv] = swt2(dv1,1,'haar');
%Compute the skewness value of 'swt of v'
sav=skewness(av(:));
shv=skewness(hv(:));
svv=skewness(vv(:));
sdv=skewness(dv(:));
%Compute the skewness value of 'swt of s'
sas=skewness(as(:));
shs=skewness(hs(:));
svs=skewness(vs(:));
sds=skewness(ds(:));
%Perform the thresholding operation
b=(av>=sav);
c=(hv>=shv);
d=(vv>=svv);
e=(dv>=sdv);
f=(as>=sas);
g=(hs>=shs);
h=(vs>=svs);
i=(ds>=sds);
j=(b&f);
k=(c&g);
l=(d&h);
m=(e&i);
%Perform the inverse 'swt'operation
recv = iswt2(b,c,d,e,'haar');
recs= iswt2(j,k,l,m,'haar');
de_shadow=cat(3,dh,recs,recv);
mask1=hsv2rgb(de_shadow);
mask1=rgb2gray(mask1);
end
function displayTrackingResults()
% Convert the frame and the mask to uint8 RGB.
frame = im2uint8(frame);
mask1 = uint8(repmat(mask1, [1, 1, 3])) .* 255;
% Display the mask and the frame.
obj.maskPlayer.step(mask1);
obj.videoPlayer.step(frame);
end
end
I wish to remove moving shadow in video. I followed the steps as mentioned in this article, but getting the same result before and after applying threshold operation on the swt output. didn't get the expected output...Can anyone suggest me what I am doing wrong?
Steps to do for shadow removal:
(i) Read the video.
(ii) After the color conversion split the h s v components
(iii) applying the stationary wavelet transform on on s and v components of frame
(iv) calculate skew value for respective swt output of s and v component
(v) Assign the value 1 and 0 to 's and v' pixel if swt of v is greater than skewness value of v likewise for s too.
(vi) Do inverse swt over the s and v
(vii)Combine the h s and v
clc; % Clear the command window.
close all; % Close all figures (except those of imtool.)
imtool close all; % Close all imtool figures.
clear; % Erase all existing variables.
workspace; % Make sure the workspace panel is showing.
fontSize = 22;
movieFullFileName = fullfile('cam4v.mp4');
% Check to see that it exists.
if ~exist(movieFullFileName, 'file')
strErrorMessage = sprintf('File not found:\n%s\nYou can choose a new one,
or cancel', movieFullFileName);
response = questdlg(strErrorMessage, 'File not found', 'OK - choose a new
movie.', 'Cancel', 'OK - choose a new movie.');
if strcmpi(response, 'OK - choose a new movie.')
[baseFileName, folderName, FilterIndex] = uigetfile('*.avi');
if ~isequal(baseFileName, 0)
movieFullFileName = fullfile(folderName, baseFileName);
else
return;
end
else
return;
end
end
try
videoObject = VideoReader(movieFullFileName);
% Determine how many frames there are.
numberOfFrames = videoObject.NumberOfFrames;
vidHeight = videoObject.Height;
vidWidth = videoObject.Width;
numberOfFramesWritten = 0;
% Prepare a figure to show the images in the upper half of the screen.
figure;
% screenSize = get(0, 'ScreenSize');
% Enlarge figure to full screen.
set(gcf, 'units','normalized','outerposition',[0 0 1 1]);
% Ask user if they want to write the individual frames out to disk.
promptMessage = sprintf('Do you want to save the individual frames out to
individual disk files?');
button = questdlg(promptMessage, 'Save individual frames?', 'Yes', 'No',
'Yes');
if strcmp(button, 'Yes')
writeToDisk = true;
% Extract out the various parts of the filename.
[folder, baseFileName, extentions] = fileparts(movieFullFileName);
% Make up a special new output subfolder for all the separate
% movie frames that we're going to extract and save to disk.
% (Don't worry - windows can handle forward slashes in the folder
name.)
folder = pwd; % Make it a subfolder of the folder where this m-file
lives.
outputFolder = sprintf('%s/Movie Frames from %s', folder,
baseFileName);
% Create the folder if it doesn't exist already.
if ~exist(outputFolder, 'dir')
mkdir(outputFolder);
end
else
writeToDisk = false;
end
% Loop through the movie, writing all frames out.
% Each frame will be in a separate file with unique name.
for frame = 1 : numberOfFrames
% Extract the frame from the movie structure.
thisFrame = read(videoObject, frame);
% Display it
hImage = subplot(2, 2, 1);
image(thisFrame);
caption = sprintf('Frame %4d of %d.', frame, numberOfFrames);
title(caption, 'FontSize', fontSize);
drawnow; % Force it to refresh the window.
% Write the image array to the output file, if requested.
if writeToDisk
% Construct an output image file name.
outputBaseFileName = sprintf('Frame %4.4d.png', frame);
outputFullFileName = fullfile(outputFolder, outputBaseFileName);
% Stamp the name and frame number onto the image.
% At this point it's just going into the overlay,not actually
getting written into the pixel values.
text(5, 15, outputBaseFileName, 'FontSize', 20);
% Extract the image with the text "burned into" it.
frameWithText = getframe(gca);
% frameWithText.cdata is the image with the text
% actually written into the pixel values.
% Write it out to disk.
imwrite(frameWithText.cdata, outputFullFileName, 'png');
end
if frame == 1
xlabel('Frame Number');
ylabel('Gray Level');
% Get size data later for preallocation if we read
% the movie back in from disk.
[rows, columns, numberOfColorChannels] = size(thisFrame);
end
% Update user with the progress. Display in the command window.
if writeToDisk
progressIndication = sprintf('Wrote frame %4d of %d.', frame,
numberOfFrames);
else
progressIndication = sprintf('Processed frame %4d of %d.', frame,
numberOfFrames);
end
disp(progressIndication);
% Increment frame count (should eventually = numberOfFrames
% unless an error happens).
numberOfFramesWritten = numberOfFramesWritten + 1;
% Now let's do the differencing
alpha = 0.5;
if frame == 1
Background = thisFrame;
else
% Change background slightly at each frame
% Background(t+1)=(1-alpha)*I+alpha*Background
Background = (1-alpha)* thisFrame + alpha * Background;
end
% Display the changing/adapting background.
subplot(2, 2, 3);
imshow(Background);
title('Adaptive Background', 'FontSize', fontSize);
% Do color conversion from rgb to hsv
x=rgb2hsv(thisFrame);
y=rgb2hsv(Background);
% Split the hsv component to h,s,v value
Hx = x(:,:,1); Hy = y(:,:,1);
Sx = x(:,:,2); Sy = y(:,:,2);
Vx = x(:,:,3); Vy = y(:,:,3);
%Find the absolute diffrence between h s v value of current and
previous frame
dh=(abs(double(Hx) - double(Hy)));
ds1=(abs(double(Sx) - double(Sy)));
dv1=(abs(double(Vx) - double(Vy)));
%Perform the swt2 transformation on difference of s and v value
[as,hs,vs,ds] = swt2(ds1,1,'haar');
[av,hv,vv,dv] = swt2(dv1,1,'haar');
%Compute the skewness value of 'swt of v'
sav=skewness(av(:));
shv=skewness(hv(:));
svv=skewness(vv(:));
sdv=skewness(dv(:));
%Compute the skewness value of 'swt of s'
sas=skewness(as(:));
shs=skewness(hs(:));
svs=skewness(vs(:));
sds=skewness(ds(:));
% Do the shadow detection based on the output of swt and skew of 'v'
value
% Compare swt v output with its skew value if the av >=sav then av is
assigned to one else it becomes zero.This operation continues till variable i
b=(av>=sav); f= (as>=sas);
c=(hv>=shv); g=(hs>=shs);
d=(vv>=svv); h=(vs>=svs);
e=(dv>=sdv); i=(ds>=sds);
% Remove the shadows based on 'and operation
j=(b&f); l=(d&h);
k=(c&g); m=(e&i);
% Do inverse swt operation
recv = iswt2(b,c,d,e,'haar');
recs= iswt2(j,k,l,m,'haar');
%Combine the value of h,s and v
de_shadow=cat(3,dh,recs,recv);
rgb=hsv2rgb(de_shadow);
% Plot the image.
subplot(2, 2, 4);
imshow(rgb);
title('output Image', 'FontSize', fontSize);
end
% Alert user that we're done.
if writeToDisk
finishedMessage = sprintf('Done! It wrote %d frames to
folder\n"%s"', numberOfFramesWritten, outputFolder);
else
finishedMessage = sprintf('Done! It processed %d frames of\n"%s"',
numberOfFramesWritten, movieFullFileName);
end
disp(finishedMessage); % Write to command window.
uiwait(msgbox(finishedMessage)); % Also pop up a message box.
% Exit if they didn't write any individual frames out to disk.
if ~writeToDisk
return;
end
% Ask user if they want to read the individual frames from the disk,
% that they just wrote out, back into a movie and display it.
promptMessage = sprintf('Do you want to recall the individualframes\nback
from disk into a movie?\n(This will take several seconds.)');
button = questdlg(promptMessage, 'Recall Movie?', 'Yes', 'No', 'Yes');
if strcmp(button, 'No')
return;
end
% Create a VideoWriter object to write the video out to a new, different
file.
writerObj = VideoWriter('Newcam4v.mp4');
open(writerObj);
% Read the frames back in from disk, and convert them to a movie.
% Preallocate recalledMovie, which will be an array of structures.
% First get a cell array with all the frames.
allTheFrames = cell(numberOfFrames,1);
allTheFrames(:) = {zeros(vidHeight, vidWidth, 3, 'uint8')};
% Next get a cell array with all the colormaps.
allTheColorMaps = cell(numberOfFrames,1);
allTheColorMaps(:) = {zeros(256, 3)};
% Now combine these to make the array of structures.
recalledMovie = struct('cdata', allTheFrames, 'colormap',allTheColorMaps)
for frame = 1 : numberOfFrames
% Construct an output image file name.
outputBaseFileName = sprintf('Frame %4.4d.png', frame);
outputFullFileName = fullfile(outputFolder, outputBaseFileName);
% Read the image in from disk.
thisFrame = imread(outputFullFileName);
% Convert the image into a "movie frame" structure.
thisFrame=imresize(thisFrame,[452, 231]);
recalledMovie(frame) = im2frame(thisFrame);
% Write this frame out to a new video file.
writeVideo(writerObj, thisFrame);
end
close(writerObj);
% Get rid of old image and plot.
delete(hImage);
% Create new axes for our movie.
subplot(1, 3, 2);
axis off; % Turn off axes numbers.
title('Movie recalled from disk', 'FontSize', fontSize);
% Play the movie in the axes.
movie(recalledMovie);
% Note: if you want to display graphics or text in the overlay
% as the movie plays back then you need to do it like I did at first
% (at the top of this file where you extract and imshow a frame at a
time.)
msgbox('Done this experiment!');
catch ME
% Some error happened if you get here.
strErrorMessage = sprintf('Error extracting movie frames
from:\n\n%s\n\nError: %s\n\n)', movieFullFileName, ME.message);
uiwait(msgbox(strErrorMessage));
end
The variable bc is getting overwritten and I am Unable to plot all the values of the variable from the start.
I tried exporting the variable to a csv file but that didn't work.
I'm trying to detect a red, green, blue object and plot its coordinates versus time in matlab.
a = imaqhwinfo;
[camera_name, camera_id, format] = getCameraInfo(a);
% Capture the video frames using the videoinput function
% You have to replace the resolution & your installed adaptor name.
vid = videoinput(camera_name, camera_id, format);
% Set the properties of the video object
set(vid, 'FramesPerTrigger', Inf);
set(vid, 'ReturnedColorspace', 'rgb')
vid.FrameGrabInterval = 5;
%start the video aquisition here
start(vid)
% Set a loop that stop after 100 frames of aquisition
while(vid.FramesAcquired<=100)
% Get the snapshot of the current frame
data = getsnapshot(vid);
% Now to track red objects in real time
% we have to subtract the red component
% from the grayscale image to extract the red components in the image.
diff_im = imsubtract(data(:,:,1), rgb2gray(data));
%Use a median filter to filter out noise
diff_im = medfilt2(diff_im, [3 3]);
% Convert the resulting grayscale image into a binary image.
diff_im = im2bw(diff_im,0.18);
% Remove all those pixels less than 300px
diff_im = bwareaopen(diff_im,300);
% Label all the connected components in the image.
bw = bwlabel(diff_im, 8);
% Here we do the image blob analysis.
% We get a set of properties for each labeled region.
stats = regionprops(bw, 'BoundingBox', 'Centroid');
% Display the image
imshow(data)
hold on
%
% %This is a loop to bound the red objects in a rectangular box.
for object = 1:length(stats)
bb = stats(object).BoundingBox;
bc = stats(object).Centroid;
rectangle('Position',bb,'EdgeColor','r','LineWidth',2)
plot(bc(1),bc(2), '-m+')
x = bc(1);
y = bc(2);
csvwrite('bcx.dat', bc(1));
csvwrite('bcy.dat', bc(2));
a=text(bc(1)+15,bc(2), strcat('X: ', num2str(round(bc(1))), ' Y: ', num2str(round(bc(2)))));
set(a, 'FontName', 'Arial', 'FontWeight', 'bold', 'FontSize', 12, 'Color', 'yellow');
end
hold off
end
% Both the loops end here.
% Stop the video aquisition.
stop(vid);
% Flush all the image data stored in the memory buffer.
flushdata(vid);
% Clear all variables
clear all
%sprintf('%s','That was all about Image tracking, Guess that was pretty easy :) ')
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 6 years ago.
Improve this question
I'm using the tutorial Motion-Based Multiple Object Tracking and have been able to successfully get that to work with my video, but is it possible to save the video that I am getting as output? I tried saving it with the code as given below, but it is not solving my issue. Kindly help.
function multiObjectTracking()
% Create System objects used for reading video, detecting moving objects,
% and displaying the results.
obj = setupSystemObjects();
tracks = initializeTracks(); % Create an empty array of tracks.
nextId = 1; % ID of the next track
% Detect moving objects, and track them across video frames.
while ~isDone(obj.reader)
frame = readFrame();
[centroids, bboxes, mask] = detectObjects(frame);
predictNewLocationsOfTracks();
[assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment();
updateAssignedTracks();
updateUnassignedTracks();
deleteLostTracks();
createNewTracks();
displayTrackingResults();
end
function obj = setupSystemObjects()
% Initialize Video I/O
% Create objects for reading a video from a file, drawing the tracked
% objects in each frame, and playing the video.
% Create a video file reader.
obj.reader = vision.VideoFileReader('./F1.mov');
% Create two video players, one to display the video,
% and one to display the foreground mask.
obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]);
obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]);
% Create System objects for foreground detection and blob analysis
% The foreground detector is used to segment moving objects from
% the background. It outputs a binary mask, where the pixel value
% of 1 corresponds to the foreground and the value of 0 corresponds
% to the background.
obj.detector = vision.ForegroundDetector('NumGaussians', 3, ...
'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7);
% Connected groups of foreground pixels are likely to correspond to moving
% objects. The blob analysis System object is used to find such groups
% (called 'blobs' or 'connected components'), and compute their
% characteristics, such as area, centroid, and the bounding box.
obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ...
'AreaOutputPort', true, 'CentroidOutputPort', true, ...
'MinimumBlobArea', 400);
end
function tracks = initializeTracks()
% create an empty array of tracks
tracks = struct(...
'id', {}, ...
'bbox', {}, ...
'kalmanFilter', {}, ...
'age', {}, ...
'totalVisibleCount', {}, ...
'consecutiveInvisibleCount', {});
end
function frame = readFrame()
frame = obj.reader.step();
end
function [centroids, bboxes, mask] = detectObjects(frame)
% Detect foreground.
mask = obj.detector.step(frame);
% Apply morphological operations to remove noise and fill in holes.
mask = imopen(mask, strel('rectangle', [3,3]));
mask = imclose(mask, strel('rectangle', [15, 15]));
mask = imfill(mask, 'holes');
% Perform blob analysis to find connected components.
[~, centroids, bboxes] = obj.blobAnalyser.step(mask);
end
function predictNewLocationsOfTracks()
for i = 1:length(tracks)
bbox = tracks(i).bbox;
% Predict the current location of the track.
predictedCentroid = predict(tracks(i).kalmanFilter);
% Shift the bounding box so that its center is at
% the predicted location.
predictedCentroid = int32(predictedCentroid) - bbox(3:4) / 2;
tracks(i).bbox = [predictedCentroid, bbox(3:4)];
end
end
function [assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment()
nTracks = length(tracks);
nDetections = size(centroids, 1);
% Compute the cost of assigning each detection to each track.
cost = zeros(nTracks, nDetections);
for i = 1:nTracks
cost(i, :) = distance(tracks(i).kalmanFilter, centroids);
end
% Solve the assignment problem.
costOfNonAssignment = 20;
[assignments, unassignedTracks, unassignedDetections] = ...
assignDetectionsToTracks(cost, costOfNonAssignment);
end
function updateAssignedTracks()
numAssignedTracks = size(assignments, 1);
for i = 1:numAssignedTracks
trackIdx = assignments(i, 1);
detectionIdx = assignments(i, 2);
centroid = centroids(detectionIdx, :);
bbox = bboxes(detectionIdx, :);
% Correct the estimate of the object's location
% using the new detection.
correct(tracks(trackIdx).kalmanFilter, centroid);
% Replace predicted bounding box with detected
% bounding box.
tracks(trackIdx).bbox = bbox;
% Update track's age.
tracks(trackIdx).age = tracks(trackIdx).age + 1;
% Update visibility.
tracks(trackIdx).totalVisibleCount = ...
tracks(trackIdx).totalVisibleCount + 1;
tracks(trackIdx).consecutiveInvisibleCount = 0;
end
end
function updateUnassignedTracks()
for i = 1:length(unassignedTracks)
ind = unassignedTracks(i);
tracks(ind).age = tracks(ind).age + 1;
tracks(ind).consecutiveInvisibleCount = ...
tracks(ind).consecutiveInvisibleCount + 1;
end
end
function deleteLostTracks()
if isempty(tracks)
return;
end
invisibleForTooLong = 20;
ageThreshold = 8;
% Compute the fraction of the track's age for which it was visible.
ages = [tracks(:).age];
totalVisibleCounts = [tracks(:).totalVisibleCount];
visibility = totalVisibleCounts ./ ages;
% Find the indices of 'lost' tracks.
lostInds = (ages < ageThreshold & visibility < 0.6) | ...
[tracks(:).consecutiveInvisibleCount] >= invisibleForTooLong;
% Delete lost tracks.
tracks = tracks(~lostInds);
end
function createNewTracks()
centroids = centroids(unassignedDetections, :);
bboxes = bboxes(unassignedDetections, :);
for i = 1:size(centroids, 1)
centroid = centroids(i,:);
bbox = bboxes(i, :);
% Create a Kalman filter object.
kalmanFilter = configureKalmanFilter('ConstantVelocity', ...
centroid, [200, 50], [100, 25], 100);
% Create a new track.
newTrack = struct(...
'id', nextId, ...
'bbox', bbox, ...
'kalmanFilter', kalmanFilter, ...
'age', 1, ...
'totalVisibleCount', 1, ...
'consecutiveInvisibleCount', 0);
% Add it to the array of tracks.
tracks(end + 1) = newTrack;
% Increment the next id.
nextId = nextId + 1;
end
end
function displayTrackingResults()
% Convert the frame and the mask to uint8 RGB.
frame = im2uint8(frame);
mask = uint8(repmat(mask, [1, 1, 3])) .* 255;
minVisibleCount = 8;
if ~isempty(tracks)
% Noisy detections tend to result in short-lived tracks.
% Only display tracks that have been visible for more than
% a minimum number of frames.
reliableTrackInds = ...
[tracks(:).totalVisibleCount] > minVisibleCount;
reliableTracks = tracks(reliableTrackInds);
% Display the objects. If an object has not been detected
% in this frame, display its predicted bounding box.
if ~isempty(reliableTracks)
% Get bounding boxes.
bboxes = cat(1, reliableTracks.bbox);
% Get ids.
ids = int32([reliableTracks(:).id]);
% Create labels for objects indicating the ones for
% which we display the predicted rather than the actual
% location.
labels = cellstr(int2str(ids'));
predictedTrackInds = ...
[reliableTracks(:).consecutiveInvisibleCount] > 0;
isPredicted = cell(size(labels));
isPredicted(predictedTrackInds) = {' predicted'};
labels = strcat(labels, isPredicted);
% Draw the objects on the frame.
frame = insertObjectAnnotation(frame, 'rectangle', ...
bboxes, labels);
% Draw the objects on the mask.
mask = insertObjectAnnotation(mask, 'rectangle', ...
bboxes, labels);
end
end
myVideo = VideoWriter('myfile.avi');
myVideo.FrameRate = 15; % Default 30
myVideo.Quality = 75; % Default 75
open(myVideo);
for i = 1:length(tracks)
writeVideo(myVideo, mask);
obj.maskPlayer.step(mask);
% obj.videoPlayer.step(frame);
% pause(1);
end
close(myVideo);
% Display the mask and the frame.
obj.maskPlayer.step(mask);
obj.videoPlayer.step(frame);
pause(1);
%saveas(fm1,'g.jpg')
end
end
There is indeed too much code here. You did not have to paste the entire program.
One problem that I can see is that you have a loop over the number of tracks, and you write a frame to a video file for each track, which is wrong. You should be writing a frame to a video file only once per each iteration of the main loop.
If all you want is to create a video with the bounding boxes around tracked objects, you should write each frame with the annotations to the video file right after the call to insertObjectAnnotation.
I'm trying to develop an object tracking script that finds all of the object's centroids at each time point so that I can then calculate their velocity based on the time between each frame. I'm using the tutorial Motion-Based Multiple Object Tracking and have been able to successfully get that to work with my video, but now I'm trying to figure out how to extract the centroid data of each object and subsequently calculate the velocity! Please let me know if you have any recommendations!
Best, Ben
function multiObjectTracking()
% Create System objects used for reading video, detecting moving objects,
% and displaying the results.
obj = setupSystemObjects();
tracks = initializeTracks(); % Create an empty array of tracks.
nextId = 1; % ID of the next track
% Detect moving objects, and track them across video frames.
while ~isDone(obj.reader)
frame = readFrame();
[centroids, bboxes, mask] = detectObjects(frame);
predictNewLocationsOfTracks();
[assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment();
updateAssignedTracks();
updateUnassignedTracks();
deleteLostTracks();
createNewTracks();
displayTrackingResults();
end
function obj = setupSystemObjects()
% Initialize Video I/O
% Create objects for reading a video from a file, drawing the tracked
% objects in each frame, and playing the video.
% Create a video file reader.
obj.reader = vision.VideoFileReader('Beads.wmv');
% Create two video players, one to display the video,
% and one to display the foreground mask.
obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]);
obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]);
% Create System objects for foreground detection and blob analysis
% The foreground detector is used to segment moving objects from
% the background. It outputs a binary mask, where the pixel value
% of 1 corresponds to the foreground and the value of 0 corresponds
% to the background.
obj.detector = vision.ForegroundDetector('NumGaussians', 3, ...
'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7);
% Connected groups of foreground pixels are likely to correspond to moving
% objects. The blob analysis System object is used to find such groups
% (called 'blobs' or 'connected components'), and compute their
% characteristics, such as area, centroid, and the bounding box.
obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ...
'AreaOutputPort', true, 'CentroidOutputPort', true, ...
'MinimumBlobArea', 400);
end
function tracks = initializeTracks()
% create an empty array of tracks
tracks = struct(...
'id', {}, ...
'bbox', {}, ...
'kalmanFilter', {}, ...
'age', {}, ...
'totalVisibleCount', {}, ...
'consecutiveInvisibleCount', {});
end
function frame = readFrame()
frame = obj.reader.step();
end
function [centroids, bboxes, mask] = detectObjects(frame)
% Detect foreground.
mask = obj.detector.step(frame);
% Apply morphological operations to remove noise and fill in holes.
mask = imopen(mask, strel('rectangle', [3,3]));
mask = imclose(mask, strel('rectangle', [15, 15]));
mask = imfill(mask, 'holes');
% Perform blob analysis to find connected components.
[~, centroids, bboxes] = obj.blobAnalyser.step(mask);
end
function predictNewLocationsOfTracks()
for i = 1:length(tracks)
bbox = tracks(i).bbox;
% Predict the current location of the track.
predictedCentroid = predict(tracks(i).kalmanFilter);
% Shift the bounding box so that its center is at
% the predicted location.
predictedCentroid = int32(predictedCentroid) - bbox(3:4) / 2;
tracks(i).bbox = [predictedCentroid, bbox(3:4)];
end
end
function [assignments, unassignedTracks, unassignedDetections] = ...
detectionToTrackAssignment()
nTracks = length(tracks);
nDetections = size(centroids, 1);
% Compute the cost of assigning each detection to each track.
cost = zeros(nTracks, nDetections);
for i = 1:nTracks
cost(i, :) = distance(tracks(i).kalmanFilter, centroids);
end
% Solve the assignment problem.
costOfNonAssignment = 20;
[assignments, unassignedTracks, unassignedDetections] = ...
assignDetectionsToTracks(cost, costOfNonAssignment);
end
function updateAssignedTracks()
numAssignedTracks = size(assignments, 1);
for i = 1:numAssignedTracks
trackIdx = assignments(i, 1);
detectionIdx = assignments(i, 2);
centroid = centroids(detectionIdx, :);
bbox = bboxes(detectionIdx, :);
% Correct the estimate of the object's location
% using the new detection.
correct(tracks(trackIdx).kalmanFilter, centroid);
% Replace predicted bounding box with detected
% bounding box.
tracks(trackIdx).bbox = bbox;
% Update track's age.
tracks(trackIdx).age = tracks(trackIdx).age + 1;
% Update visibility.
tracks(trackIdx).totalVisibleCount = ...
tracks(trackIdx).totalVisibleCount + 1;
tracks(trackIdx).consecutiveInvisibleCount = 0;
end
end
function updateUnassignedTracks()
for i = 1:length(unassignedTracks)
ind = unassignedTracks(i);
tracks(ind).age = tracks(ind).age + 1;
tracks(ind).consecutiveInvisibleCount = ...
tracks(ind).consecutiveInvisibleCount + 1;
end
end
function deleteLostTracks()
if isempty(tracks)
return;
end
invisibleForTooLong = 20;
ageThreshold = 8;
% Compute the fraction of the track's age for which it was visible.
ages = [tracks(:).age];
totalVisibleCounts = [tracks(:).totalVisibleCount];
visibility = totalVisibleCounts ./ ages;
% Find the indices of 'lost' tracks.
lostInds = (ages < ageThreshold & visibility < 0.6) | ...
[tracks(:).consecutiveInvisibleCount] >= invisibleForTooLong;
% Delete lost tracks.
tracks = tracks(~lostInds);
end
function createNewTracks()
centroids = centroids(unassignedDetections, :);
bboxes = bboxes(unassignedDetections, :);
for i = 1:size(centroids, 1)
centroid = centroids(i,:);
bbox = bboxes(i, :);
% Create a Kalman filter object.
kalmanFilter = configureKalmanFilter('ConstantVelocity', ...
centroid, [200, 50], [100, 25], 100);
% Create a new track.
newTrack = struct(...
'id', nextId, ...
'bbox', bbox, ...
'kalmanFilter', kalmanFilter, ...
'age', 1, ...
'totalVisibleCount', 1, ...
'consecutiveInvisibleCount', 0);
% Add it to the array of tracks.
tracks(end + 1) = newTrack;
% Increment the next id.
nextId = nextId + 1;
end
end
function displayTrackingResults()
% Convert the frame and the mask to uint8 RGB.
frame = im2uint8(frame);
mask = uint8(repmat(mask, [1, 1, 3])) .* 255;
minVisibleCount = 8;
if ~isempty(tracks)
% Noisy detections tend to result in short-lived tracks.
% Only display tracks that have been visible for more than
% a minimum number of frames.
reliableTrackInds = ...
[tracks(:).totalVisibleCount] > minVisibleCount;
reliableTracks = tracks(reliableTrackInds);
% Display the objects. If an object has not been detected
% in this frame, display its predicted bounding box.
if ~isempty(reliableTracks)
% Get bounding boxes.
bboxes = cat(1, reliableTracks.bbox);
% Get ids.
ids = int32([reliableTracks(:).id]);
% Create labels for objects indicating the ones for
% which we display the predicted rather than the actual
% location.
labels = cellstr(int2str(ids'));
predictedTrackInds = ...
[reliableTracks(:).consecutiveInvisibleCount] > 0;
isPredicted = cell(size(labels));
isPredicted(predictedTrackInds) = {' predicted'};
labels = strcat(labels, isPredicted);
% Draw the objects on the frame.
frame = insertObjectAnnotation(frame, 'rectangle', ...
bboxes, labels);
% Draw the objects on the mask.
mask = insertObjectAnnotation(mask, 'rectangle', ...
bboxes, labels);
end
end
% Display the mask and the frame.
obj.maskPlayer.step(mask);
obj.videoPlayer.step(frame);
end
end
Since you have the centroid of the object in every frame, you can compute its velocity in pixels/frame by subtracting consecutive centroids.