Test / Match images in a SVM classifier - matlab

I have extracted HoG features from a set of 20images. I did classification with the following code... But I have issues testing the classifier. What am u doing wrong please help...
%% Load Images
imgFolder1 = fullfile('C:\Users\Engineering\Desktop\Finn\NEW');
imgFolder2 = fullfile('C:\Users\Engineering\Desktop\Finn\NEW2');
training = imageSet(imgFolder1);
test = imageSet(imgFolder2);
%% Extract and display Histogram of Oriented Gradient Features for single Note
[hogFeature, visualization]= ...
extractHOGFeatures(read(training,1));
figure;
subplot(2,1,1);imshow(read(training,1));title('Input Face');
subplot(2,1,2);plot(visualization);title('HoG Feature');
%% Extract HOG Features for training set
trainingFeatures = [];
trainingLabel = [];
for i = 1:training.Count
[hogFeature, visualization] = ...
extractHOGFeatures(read(training,i));
trainingFeatures = [trainingFeatures;hogFeature];
end
%%
labels = repmat(training.Description, i);
trainingLabel = [trainingLabel; labels];
%% I am not sure about this line for noteIndex:
noteIndex = trainingLabel, i
%% Create 20 class classifier using fitcecoc
noteClassifier = fitcecoc(trainingFeatures,trainingLabel);
%% Test Images from Test Set
note = 1;
queryImage = read(test(note),1);
queryFeatures = extractHOGFeatures(queryImage);
noteLabel = predict(noteClassifier,queryFeatures);
% Map back to training set to find identity
booleanIndex = strcmp(noteLabel, noteIndex);
integerIndex = find(booleanIndex);
subplot(1,2,1);imshow(queryImage);title('Query Face');
subplot(1,2,2);imshow(read(training(integerIndex),1));title('Matched Class');
%% Test First 5 notes from Test Set
figure;
figureNum = 1;
for note=1
for k = 1:test(note).Count
queryImage = read(test(note),k);
queryFeatures = extractHOGFeatures(queryImage);
noteLabel = predict(noteClassifier,queryFeatures);
% Map back to training set to find identity
booleanIndex = strcmp(noteLabel, noteIndex);
integerIndex = find(booleanIndex);
subplot(4,4,figureNum);imshow(imresize(queryImage,3));title('Query Note');
subplot(4,4,figureNum+1);imshow(imresize(read(training(integerIndex),1),3));title('Matched Class');
figureNum = figureNum+2;
end
figure;
figureNum = 1;
end
%% I get these errors:
(this method is not supported for arrays if image Set objects.)
%%And
(error in: subplot(1,2,2);imshow(read(training(integerIndex),1));title('Matched Class');
%% so it displays query image but match image is like a graph plot with no image.

Related

How do I find local threshold for coefficients in image compression using DWT in MATLAB

I'm trying to write an image compression script in MATLAB using multilayer 3D DWT(color image). along the way, I want to apply thresholding on coefficient matrices, both global and local thresholds.
I like to use the formula below to calculate my local threshold:
where sigma is variance and N is the number of elements.
Global thresholding works fine; but my problem is that the calculated local threshold is (most often!) greater than the maximum band coefficient, therefore no thresholding is applied.
Everything else works fine and I get a result too, but I suspect the local threshold is miscalculated. Also, the resulting image is larger than the original!
I'd appreciate any help on the correct way to calculate the local threshold, or if there's a pre-set MATLAB function.
here's an example output:
here's my code:
clear;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% COMPRESSION %%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% read base image
% dwt 3/5-L on base images
% quantize coeffs (local/global)
% count zero value-ed coeffs
% calculate mse/psnr
% save and show result
% read images
base = imread('circ.jpg');
fam = 'haar'; % wavelet family
lvl = 3; % wavelet depth
% set to 1 to apply global thr
thr_type = 0;
% global threshold value
gthr = 180;
% convert base to grayscale
%base = rgb2gray(base);
% apply dwt on base image
dc = wavedec3(base, lvl, fam);
% extract coeffs
ll_base = dc.dec{1};
lh_base = dc.dec{2};
hl_base = dc.dec{3};
hh_base = dc.dec{4};
ll_var = var(ll_base, 0);
lh_var = var(lh_base, 0);
hl_var = var(hl_base, 0);
hh_var = var(hh_base, 0);
% count number of elements
ll_n = numel(ll_base);
lh_n = numel(lh_base);
hl_n = numel(hl_base);
hh_n = numel(hh_base);
% find local threshold
ll_t = ll_var * (sqrt(2 * log2(ll_n)));
lh_t = lh_var * (sqrt(2 * log2(lh_n)));
hl_t = hl_var * (sqrt(2 * log2(hl_n)));
hh_t = hh_var * (sqrt(2 * log2(hh_n)));
% global
if thr_type == 1
ll_t = gthr; lh_t = gthr; hl_t = gthr; hh_t = gthr;
end
% count zero values in bands
ll_size = size(ll_base);
lh_size = size(lh_base);
hl_size = size(hl_base);
hh_size = size(hh_base);
% count zero values in new band matrices
ll_zeros = sum(ll_base==0,'all');
lh_zeros = sum(lh_base==0,'all');
hl_zeros = sum(hl_base==0,'all');
hh_zeros = sum(hh_base==0,'all');
% initiate new matrices
ll_new = zeros(ll_size);
lh_new = zeros(lh_size);
hl_new = zeros(lh_size);
hh_new = zeros(lh_size);
% apply thresholding on bands
% if new value < thr => 0
% otherwise, keep the previous value
for id=1:ll_size(1)
for idx=1:ll_size(2)
if ll_base(id,idx) < ll_t
ll_new(id,idx) = 0;
else
ll_new(id,idx) = ll_base(id,idx);
end
end
end
for id=1:lh_size(1)
for idx=1:lh_size(2)
if lh_base(id,idx) < lh_t
lh_new(id,idx) = 0;
else
lh_new(id,idx) = lh_base(id,idx);
end
end
end
for id=1:hl_size(1)
for idx=1:hl_size(2)
if hl_base(id,idx) < hl_t
hl_new(id,idx) = 0;
else
hl_new(id,idx) = hl_base(id,idx);
end
end
end
for id=1:hh_size(1)
for idx=1:hh_size(2)
if hh_base(id,idx) < hh_t
hh_new(id,idx) = 0;
else
hh_new(id,idx) = hh_base(id,idx);
end
end
end
% count zeros of the new matrices
ll_new_size = size(ll_new);
lh_new_size = size(lh_new);
hl_new_size = size(hl_new);
hh_new_size = size(hh_new);
% count number of zeros among new values
ll_new_zeros = sum(ll_new==0,'all');
lh_new_zeros = sum(lh_new==0,'all');
hl_new_zeros = sum(hl_new==0,'all');
hh_new_zeros = sum(hh_new==0,'all');
% set new band matrices
dc.dec{1} = ll_new;
dc.dec{2} = lh_new;
dc.dec{3} = hl_new;
dc.dec{4} = hh_new;
% count how many coeff. were thresholded
ll_zeros_diff = ll_new_zeros - ll_zeros;
lh_zeros_diff = lh_zeros - lh_new_zeros;
hl_zeros_diff = hl_zeros - hl_new_zeros;
hh_zeros_diff = hh_zeros - hh_new_zeros;
% show coeff. matrices vs. thresholded version
figure
colormap(gray);
subplot(2,4,1); imagesc(ll_base); title('LL');
subplot(2,4,2); imagesc(lh_base); title('LH');
subplot(2,4,3); imagesc(hl_base); title('HL');
subplot(2,4,4); imagesc(hh_base); title('HH');
subplot(2,4,5); imagesc(ll_new); title({'LL thr';ll_zeros_diff});
subplot(2,4,6); imagesc(lh_new); title({'LH thr';lh_zeros_diff});
subplot(2,4,7); imagesc(hl_new); title({'HL thr';hl_zeros_diff});
subplot(2,4,8); imagesc(hh_new); title({'HH thr';hh_zeros_diff});
% idwt to reconstruct compressed image
cmp = waverec3(dc);
cmp = uint8(cmp);
% calculate mse/psnr
D = abs(cmp - base) .^2;
mse = sum(D(:))/numel(base);
psnr = 10*log10(255*255/mse);
% show images and mse/psnr
figure
subplot(1,2,1);
imshow(base); title("Original"); axis square;
subplot(1,2,2);
imshow(cmp); colormap(gray); axis square;
msg = strcat("MSE: ", num2str(mse), " | PSNR: ", num2str(psnr));
title({"Compressed";msg});
% save image locally
imwrite(cmp, 'compressed.png');
I solved the question.
the sigma in the local threshold formula is not variance, it's the standard deviation. I applied these steps:
used stdfilt() std2() to find standard deviation of my coeff. matrices (thanks to #Rotem for pointing this out)
used numel() to count the number of elements in coeff. matrices
this is a summary of the process. it's the same for other bands (LH, HL, HH))
[c, s] = wavedec2(image, wname, level); %apply dwt
ll = appcoeff2(c, s, wname); %find LL
ll_std = std2(ll); %find standard deviation
ll_n = numel(ll); %find number of coeffs in LL
ll_t = ll_std * (sqrt(2 * log2(ll_n))); %local the formula
ll_new = ll .* double(ll > ll_t); %thresholding
replace the LL values in c in a for loop
reconstruct by applying IDWT using waverec2
this is a sample output:

Time Series Forecasting Using Deep Learning in MATLAB

I am using the time series forecasting sample from MathWorks in https://uk.mathworks.com/help/nnet/examples/time-series-forecasting-using-deep-learning.html
The output in the above-mentioned web-address is:
I only changed the dataset and ran the algorithm. Surprisingly, the algorithm is not working good with my dataset and generates a line as forecast as follows:
I am really confused and I cannot understand the reason behind that. I might be need to tune parameters in the algorithm that I am not aware on that. The code I am using is:
%% Load Data
%data = chickenpox_dataset;
%data = [data{:}];
data = xlsread('data.xlsx');
data = data';
%% Divide Data: Training and Testing
numTimeStepsTrain = floor(0.7*numel(data));
XTrain = data(1:numTimeStepsTrain);
YTrain = data(2:numTimeStepsTrain+1);
XTest = data(numTimeStepsTrain+1:end-1);
YTest = data(numTimeStepsTrain+2:end);
%% Standardize Data
mu = mean(XTrain);
sig = std(XTrain);
XTrain = (XTrain - mu) / sig;
YTrain = (YTrain - mu) / sig;
XTest = (XTest - mu) / sig;
%% Define LSTM Network
inputSize = 1;
numResponses = 1;
numHiddenUnits = 500;
layers = [ ...
sequenceInputLayer(inputSize)
lstmLayer(numHiddenUnits)
fullyConnectedLayer(numResponses)
regressionLayer];
%% Training Options
opts = trainingOptions('adam', ...
'MaxEpochs',500, ...
'GradientThreshold',1, ...
'InitialLearnRate',0.005, ...
'LearnRateSchedule','piecewise', ...
'LearnRateDropPeriod',125, ...
'LearnRateDropFactor',0.2, ...
'Verbose',0, ...
'Plots','training-progress');
%% Train Network
net = trainNetwork(XTrain,YTrain,layers,opts);
%% Forecast Future Time Steps
net = predictAndUpdateState(net,XTrain);
[net,YPred] = predictAndUpdateState(net,YTrain(end));
numTimeStepsTest = numel(XTest);
for i = 2:numTimeStepsTest
[net,YPred(1,i)] = predictAndUpdateState(net,YPred(i-1));
end
%% Unstandardize the predictions using mu and sig calculated earlier.
YPred = sig*YPred + mu;
%% RMSE and MAE Calculation
rmse = sqrt(mean((YPred-YTest).^2))
MAE = mae(YPred-YTest)
%% Plot results
figure
plot(data(1:numTimeStepsTrain))
hold on
idx = numTimeStepsTrain:(numTimeStepsTrain+numTimeStepsTest);
plot(idx,[data(numTimeStepsTrain) YPred],'.-')
hold off
xlabel("Month")
ylabel("Cases")
title("Forecast")
legend(["Observed" "Forecast"])
%% Compare the forecasted values with the test data
figure
subplot(2,1,1)
plot(YTest)
hold on
plot(YPred,'.-')
hold off
legend(["Observed" "Forecast"])
ylabel("Cases")
title("Forecast")
subplot(2,1,2)
stem(YPred - YTest)
xlabel("Month")
ylabel("Error")
title("RMSE = " + rmse)
And the data.xlsx is in: https://www.dropbox.com/s/vv1apug7iqlocu1/data.xlsx?dl=1
You want to find temporal patterns in the data. Matlab's data looks like a sine-wave with noise, a very clear pattern. Your data is far from showing a clear pattern. Your data needs preprocessing. I would start by removing the slow drifts. A high-pass, or band-pass filter of some sort makes sense. Here is a simple line just for a quick view of your data without the slow frequencies:
T=readtable('data.xlsx','readvariablenames',0);
figure; plot(T.Var1-smoothdata(T.Var1,'movmean',200))

How does the choice of the wavelet function impact the speed of cwt()?

In cwt() I can specify which wavelet function to use. How does that impact the speed of cwt()?
Here is a benchmark, which I run with the -singleCompThread option when starting MATLAB to force it to use a single computational thread. cwt() was passed a 1,000,000-sample signal and asked to compute scales 1 to 10. My CPU is an i7-3610QM.
Code used:
clear all
%% Benchmark parameters
results_file_name = 'results_scale1-10.csv';
number_of_random_runs = 10;
scales = 1:10;
number_of_random_samples = 1000000;
%% Construct a cell array containing all the wavelet names
wavelet_haar_names = {'haar'};
wavelet_db_names = {'db1'; 'db2'; 'db3'; 'db4'; 'db5'; 'db6'; 'db7'; 'db8'; 'db9'; 'db10'};
wavelet_sym_names = {'sym2'; 'sym3'; 'sym4'; 'sym5'; 'sym6'; 'sym7'; 'sym8'};
wavelet_coif_names = {'coif1'; 'coif2'; 'coif3'; 'coif4'; 'coif5'};
wavelet_bior_names = {'bior1.1'; 'bior1.3'; 'bior1.5'; 'bior2.2'; 'bior2.4'; 'bior2.6'; 'bior2.8'; 'bior3.1'; 'bior3.3'; 'bior3.5'; 'bior3.7'; 'bior3.9'; 'bior4.4'; 'bior5.5'; 'bior6.8'};
wavelet_rbior_names = {'rbio1.1'; 'rbio1.3'; 'rbio1.5'; 'rbio2.2'; 'rbio2.4'; 'rbio2.6'; 'rbio2.8'; 'rbio3.1'; 'rbio3.3'; 'rbio3.5'; 'rbio3.7'; 'rbio3.9'; 'rbio4.4'; 'rbio5.5'; 'rbio6.8'};
wavelet_meyer_names = {'meyr'};
wavelet_dmeyer_names = {'dmey'};
wavelet_gaus_names = {'gaus1'; 'gaus2'; 'gaus3'; 'gaus4'; 'gaus5'; 'gaus6'; 'gaus7'; 'gaus8'};
wavelet_mexh_names = {'mexh'};
wavelet_morl_names = {'morl'};
wavelet_cgau_names = {'cgau1'; 'cgau2'; 'cgau3'; 'cgau4'; 'cgau5'};
wavelet_shan_names = {'shan1-1.5'; 'shan1-1'; 'shan1-0.5'; 'shan1-0.1'; 'shan2-3'};
wavelet_fbsp_names = {'fbsp1-1-1.5'; 'fbsp1-1-1'; 'fbsp1-1-0.5'; 'fbsp2-1-1'; 'fbsp2-1-0.5'; 'fbsp2-1-0.1'};
wavelet_cmor_names = {'cmor1-1.5'; 'cmor1-1'; 'cmor1-0.5'; 'cmor1-1'; 'cmor1-0.5'; 'cmor1-0.1'};
% Concatenate all wavelet names into a single cell array
wavelet_categories_names = who('wavelet*names');
wavelet_names = {};
for wavelet_categories_number=1:size(wavelet_categories_names,1)
temp = wavelet_categories_names(wavelet_categories_number);
temp = eval(temp{1});
wavelet_names = vertcat(wavelet_names, temp);
end
%% Prepare data
random_signal = rand(number_of_random_runs,number_of_random_samples);
%% Run benchmarks
result_file_ID = fopen(results_file_name, 'w');
for wavelet_number = 1:size(wavelet_names,1)
wavelet_name = wavelet_names(wavelet_number,:)
% Compute wavelet on a random signal
tic
for run = 1:number_of_random_runs
cwt(random_signal(run, :),scales,wavelet_name{1});
end
run_time_random_test = toc
fprintf(result_file_ID, '%s,', wavelet_name{1})
fprintf(result_file_ID, '%d\n', run_time_random_test)
end
size(wavelet_names,1)
fclose(result_file_ID);
If you want to see the impact of the choice of the scale:
Code used:
clear all
%% Benchmark parameters
results_file_name = 'results_sym2_change_scale.csv';
number_of_random_runs = 10;
scales = 1:10;
number_of_random_samples = 10000000;
% wavelet_names = {'sym2', 'sym3'}%, 'sym4'};
output_directory = 'output';
wavelet_names = get_all_wavelet_names();
%% Prepare data
random_signal = rand(number_of_random_runs,number_of_random_samples);
%% Prepare result folder
if ~exist(output_directory, 'dir')
mkdir(output_directory);
end
%% Run benchmarks
result_file_ID = fopen(results_file_name, 'w');
for wavelet_number = 1:size(wavelet_names,1)
wavelet_name = wavelet_names{wavelet_number}
if wavelet_number > 1
fprintf(result_file_ID, '%s\n', '');
end
fprintf(result_file_ID, '%s', wavelet_name)
run_time_random_test_scales = zeros(size(scales,2),1);
for scale_number = 1:size(scales,2)
scale = scales(scale_number);
% Compute wavelet on a random signal
tic
for run = 1:number_of_random_runs
cwt(random_signal(run, :),scale,wavelet_name);
end
run_time_random_test = toc
fprintf(result_file_ID, ',%d', run_time_random_test)
run_time_random_test_scales(scale_number) = run_time_random_test;
end
figure
bar(run_time_random_test_scales)
title(['Run time on random signal for ' wavelet_name])
xlabel('Scale')
ylabel('Run time (seconds)')
save_figure( fullfile(output_directory, ['run_time_random_test_' wavelet_name]) )
close all
end
size(wavelet_names,1)
fclose(result_file_ID);
With 3 functions:
get_all_wavelet_names.m:
function [ wavelet_names ] = get_all_wavelet_names( )
%GET_ALL_WAVELET_NAMES Get a list of available wavelet functions
%% Construct a cell array containing all the wavelet names
wavelet_haar_names = {'haar'};
wavelet_db_names = {'db1'; 'db2'; 'db3'; 'db4'; 'db5'; 'db6'; 'db7'; 'db8'; 'db9'; 'db10'};
wavelet_sym_names = {'sym2'; 'sym3'; 'sym4'; 'sym5'; 'sym6'; 'sym7'; 'sym8'};
wavelet_coif_names = {'coif1'; 'coif2'; 'coif3'; 'coif4'; 'coif5'};
wavelet_bior_names = {'bior1.1'; 'bior1.3'; 'bior1.5'; 'bior2.2'; 'bior2.4'; 'bior2.6'; 'bior2.8'; 'bior3.1'; 'bior3.3'; 'bior3.5'; 'bior3.7'; 'bior3.9'; 'bior4.4'; 'bior5.5'; 'bior6.8'};
wavelet_rbior_names = {'rbio1.1'; 'rbio1.3'; 'rbio1.5'; 'rbio2.2'; 'rbio2.4'; 'rbio2.6'; 'rbio2.8'; 'rbio3.1'; 'rbio3.3'; 'rbio3.5'; 'rbio3.7'; 'rbio3.9'; 'rbio4.4'; 'rbio5.5'; 'rbio6.8'};
wavelet_meyer_names = {'meyr'};
wavelet_dmeyer_names = {'dmey'};
wavelet_gaus_names = {'gaus1'; 'gaus2'; 'gaus3'; 'gaus4'; 'gaus5'; 'gaus6'; 'gaus7'; 'gaus8'};
wavelet_mexh_names = {'mexh'};
wavelet_morl_names = {'morl'};
wavelet_cgau_names = {'cgau1'; 'cgau2'; 'cgau3'; 'cgau4'; 'cgau5'};
wavelet_shan_names = {'shan1-1.5'; 'shan1-1'; 'shan1-0.5'; 'shan1-0.1'; 'shan2-3'};
wavelet_fbsp_names = {'fbsp1-1-1.5'; 'fbsp1-1-1'; 'fbsp1-1-0.5'; 'fbsp2-1-1'; 'fbsp2-1-0.5'; 'fbsp2-1-0.1'};
wavelet_cmor_names = {'cmor1-1.5'; 'cmor1-1'; 'cmor1-0.5'; 'cmor1-1'; 'cmor1-0.5'; 'cmor1-0.1'};
% Concatenate all wavelet names into a single cell array
wavelet_categories_names = who('wavelet*names');
wavelet_names = {};
for wavelet_categories_number=1:size(wavelet_categories_names,1)
temp = wavelet_categories_names(wavelet_categories_number);
temp = eval(temp{1});
wavelet_names = vertcat(wavelet_names, temp);
end
end
save_figure.m:
function [ ] = save_figure( output_graph_filename )
% Record aa figure as PNG and fig files
% Create the folder if it doesn't exist already.
[pathstr, name, ext] = fileparts(output_graph_filename);
if ~exist(pathstr, 'dir')
mkdir(pathstr);
end
h = gcf;
set(0,'defaultAxesFontSize',18) % http://www.mathworks.com/support/solutions/en/data/1-8XOW94/index.html?solution=1-8XOW94
boldify(h);
print('-dpng','-r600', [output_graph_filename '.png']);
print(h,[output_graph_filename '.pdf'],'-dpdf','-r600')
saveas(gcf,[output_graph_filename '.fig'], 'fig')
end
and boldify.m:
function boldify(h,g)
%BOLDIFY Make lines and text bold for standard viewgraph style.
% BOLDIFY boldifies the lines and text of the current figure.
% BOLDIFY(H) applies to the graphics handle H.
%
% BOLDIFY(X,Y) specifies an X by Y inch graph of the current
% figure. If text labels have their 'UserData' data property
% set to 'slope = ...', then the 'Rotation' property is set to
% account for changes in the graph's aspect ratio. The
% default is MATLAB's default.
% S. T. Smith
% The name of this function does not represent an endorsement by the author
% of the egregious grammatical trend of verbing nouns.
if nargin < 1, h = gcf;, end
% Set (and get) the default MATLAB paper size and position
set(gcf,'PaperPosition','default');
units = get(gcf,'PaperUnits');
set(gcf,'PaperUnits','inches');
fsize = get(gcf,'PaperPosition');
fsize = fsize(3:4); % Figure size (X" x Y") on paper.
psize = get(gcf,'PaperSize');
if nargin == 2 % User specified graph size
fsize = [h,g];
h = gcf;
end
% Set the paper position of the current figure
set(gcf,'PaperPosition', ...
[(psize(1)-fsize(1))/2 (psize(2)-fsize(2))/2 fsize(1) fsize(2)]);
fsize = get(gcf,'PaperPosition');
fsize = fsize(3:4); % Graph size (X" x Y") on paper.
set(gcf,'PaperUnits',units); % Back to original
% Get the normalized axis position of the current axes
units = get(gca,'Units');
set(gca,'Units','normalized');
asize = get(gca,'Position');
asize = asize(3:4);
set(gca,'Units',units);
ha = get(h,'Children');
for i=1:length(ha)
% if get(ha(i),'Type') == 'axes'
% changed by B. A. Miller
if strcmp(get(ha(i), 'Type'), 'axes') == 1
units = get(ha(i),'Units');
set(ha(i),'Units','normalized');
asize = get(ha(i),'Position'); % Axes Position (normalized)
asize = asize(3:4);
set(ha(i),'Units',units);
[m,j] = max(asize); j = j(1);
scale = 1/(asize(j)*fsize(j)); % scale*inches -normalized units
set(ha(i),'FontWeight','Bold');
set(ha(i),'LineWidth',2);
[m,k] = min(asize); k = k(1);
if asize(k)*fsize(k) > 1/2
set(ha(i),'TickLength',[1/8 1.5*1/8]*scale); % Gives 1/8" ticks
else
set(ha(i),'TickLength',[3/32 1.5*3/32]*scale); % Gives 3/32" ticks
end
set(get(ha(i),'XLabel'),'FontSize',18); % 14-pt labels
set(get(ha(i),'XLabel'),'FontWeight','Bold');
set(get(ha(i),'XLabel'),'VerticalAlignment','top');
set(get(ha(i),'YLabel'),'FontSize',18); % 14-pt labels
set(get(ha(i),'YLabel'),'FontWeight','Bold');
%set(get(ha(i),'YLabel'),'VerticalAlignment','baseline');
set(get(ha(i),'Title'),'FontSize',18); % 16-pt titles
set(get(ha(i),'Title'),'FontWeight','Bold');
% set(get(ha(i), 'FontSize',20, 'XTick',[]));
end
hc = get(ha(i),'Children');
for j=1:length(hc)
chtype = get(hc(j),'Type');
if chtype(1:4) == 'text'
set(hc(j),'FontSize',17); % 12 pt descriptive labels
set(hc(j),'FontWeight','Bold');
ud = get(hc(j),'UserData'); % User data
if length(ud) 8
if ud(1:8) == 'slope = ' % Account for change in actual slope
slope = sscanf(ud,'slope = %g');
slope = slope*(fsize(2)/fsize(1))/(asize(2)/asize(1));
set(hc(j),'Rotation',atan(slope)/pi*180);
end
end
elseif chtype(1:4) == 'line'
set(hc(j),'LineWidth',2);
end
end
end
Bonus: correlation between all wavelets on a random signal with 1000000 samples with the first 10 scales:
Code used:
%% PRE-REQUISITE: You need to download http://www.mathworks.com/matlabcentral/fileexchange/24253-customizable-heat-maps , which gives the function heatmap()
%% Benchmark parameters
scales = 1:10;
number_of_random_samples = 1000000;
% wavelet_names = {'sym2'; 'sym3'; 'sym4'; 'sym5'; 'sym6'; 'sym7'; 'sym8'};
% wavelet_names = {'cgau1'; 'cgau2'; 'cgau3'; 'cgau4'; 'cgau5'};
wavelet_names = {'db2'; 'sym2'};
OUTPUT_FOLDER = 'output_corr';
% wavelet_names = get_all_wavelet_names(); % WARNING: you need to remove all complex wavelets, viz. cgau1, shan, fbsp and cmor, and the heatmap will be pissed to see complex values coming to her.
%% Prepare data
random_signal = rand(1,number_of_random_samples);
results = zeros(size(wavelet_names,1), number_of_random_samples);
%% Prepare result folder
if ~exist(OUTPUT_FOLDER, 'dir')
mkdir(OUTPUT_FOLDER);
end
%% Run benchmarks
for scale_number = 1:size(scales,2)
scale = scales(scale_number);
for wavelet_number = 1:size(wavelet_names,1)
wavelet_name = wavelet_names{wavelet_number}
% Compute wavelet on a random signal
run = 1;
results(wavelet_number, :) = cwt(random_signal(run, :),scale,wavelet_name);
if wavelet_number == 999
break
end
end
correlation_results = corrcoef(results')
heatmap(correlation_results, [], [], '%0.2f', 'MinColorValue', -1.0, 'MaxColorValue', 1.0, 'Colormap', 'jet',...
'Colorbar', true, 'ColorLevels', 64, 'UseFigureColormap', false);
title(['Correlation matrix for scale ' num2str(scale)]);
xlabel(['Wavelet 1 to ' num2str(size(wavelet_names,1)) ' for scale ' num2str(scale)]);
ylabel(['Wavelet 1 to ' num2str(size(wavelet_names,1)) ' for scale ' num2str(scale)]);
snapnow
print('-dpng','-r600',fullfile(OUTPUT_FOLDER, ['scalecorr' num2str(scale) '.png']))
end
Correlation for each wavelet between different scales (1 to 100):
Code used:
%% PRE-REQUISITE: You need to download http://www.mathworks.com/matlabcentral/fileexchange/24253-customizable-heat-maps , which gives the function heatmap()
%% Benchmark parameters
scales = 1:100;
number_of_random_samples = 1000000;
% wavelet_name = 'gaus2';
% wavelet_names = {'sym2', 'sym3'}%, 'sym4'};
OUTPUT_FOLDER = 'output_corr';
wavelet_names = get_all_wavelet_names(); % WARNING: you need to remove all complex wavelets, viz. cgau1, shan, fbsp and cmor, and the heatmap will be pissed to see complex values coming to her.
%% Prepare data
random_signal = rand(1,number_of_random_samples);
results = zeros(size(scales,2), number_of_random_samples);
%% Prepare result folder
if ~exist(OUTPUT_FOLDER, 'dir')
mkdir(OUTPUT_FOLDER);
end
%% Run benchmarks
for wavelet_number = 1:size(wavelet_names,1)
wavelet_name = wavelet_names{wavelet_number}
run_time_random_test_scales = zeros(size(scales,2),1);
for scale_number = 1:size(scales,2)
scale = scales(scale_number);
run = 1;
% Compute wavelet on a random signal
results(scale_number, :) = cwt(random_signal(run, :),scale,wavelet_name);
end
correlation_results = corrcoef(results')
heatmap(correlation_results, [], [], '%0.2f', 'MinColorValue', -1.0, 'MaxColorValue', 1.0, 'Colormap', 'jet',...
'Colorbar', true, 'ColorLevels', 64, 'UseFigureColormap', false);
title(['Correlation matrix for wavelet ' wavelet_name]);
xlabel(['Scales 1 to ' num2str(max(scales)) ' for wavelet ' wavelet_name]);
ylabel(['Scales 1 to ' num2str(max(scales)) ' for wavelet ' wavelet_name]);
snapnow
print('-dpng','-r600',fullfile(OUTPUT_FOLDER, [wavelet_name '_scalecorr_scale1to' num2str(max(scales)) '.png']))
end

Issues with imgIdx in DescriptorMatcher mexopencv

My idea is simple here. I am using mexopencv and trying to see whether there is any object present in my current that matches with any image stored in my database.I am using OpenCV DescriptorMatcher function to train my images.
Here is a snippet, I am wishing to build on top of this, which is one to one one image matching using mexopencv, and can also be extended for image stream.
function hello
detector = cv.FeatureDetector('ORB');
extractor = cv.DescriptorExtractor('ORB');
matcher = cv.DescriptorMatcher('BruteForce-Hamming');
train = [];
for i=1:3
train(i).img = [];
train(i).points = [];
train(i).features = [];
end;
train(1).img = imread('D:\test\1.jpg');
train(2).img = imread('D:\test\2.png');
train(3).img = imread('D:\test\3.jpg');
for i=1:3
frameImage = train(i).img;
framePoints = detector.detect(frameImage);
frameFeatures = extractor.compute(frameImage , framePoints);
train(i).points = framePoints;
train(i).features = frameFeatures;
end;
for i = 1:3
boxfeatures = train(i).features;
matcher.add(boxfeatures);
end;
matcher.train();
camera = cv.VideoCapture;
pause(3);%Sometimes necessary
window = figure('KeyPressFcn',#(obj,evt)setappdata(obj,'flag',true));
setappdata(window,'flag',false);
while(true)
sceneImage = camera.read;
sceneImage = rgb2gray(sceneImage);
scenePoints = detector.detect(sceneImage);
sceneFeatures = extractor.compute(sceneImage,scenePoints);
m = matcher.match(sceneFeatures);
%{
%Comments in
img_no = m.imgIdx;
img_no = img_no(1);
%I am planning to do this based on the fact that
%on a perfect match imgIdx a 1xN will be filled
%with the index of the training
%example 1,2 or 3
objPoints = train(img_no+1).points;
boxImage = train(img_no+1).img;
ptsScene = cat(1,scenePoints([m.queryIdx]+1).pt);
ptsScene = num2cell(ptsScene,2);
ptsObj = cat(1,objPoints([m.trainIdx]+1).pt);
ptsObj = num2cell(ptsObj,2);
%This is where the problem starts here, assuming the
%above is correct , Matlab yells this at me
%index exceeds matrix dimensions.
end [H,inliers] = cv.findHomography(ptsScene,ptsObj,'Method','Ransac');
m = m(inliers);
imgMatches = cv.drawMatches(sceneImage,scenePoints,boxImage,boxPoints,m,...
'NotDrawSinglePoints',true);
imshow(imgMatches);
%Comment out
%}
flag = getappdata(window,'flag');
if isempty(flag) || flag, break; end
pause(0.0001);
end
Now the issue here is that imgIdx is a 1xN matrix , and it contains the index of different training indices, which is obvious. And only on a perfect match is the matrix imgIdx is completely filled with the matched image index. So, how do I use this matrix to pick the right image index. Also
in these two lines, I get the error of index exceeding matrix dimension.
ptsObj = cat(1,objPoints([m.trainIdx]+1).pt);
ptsObj = num2cell(ptsObj,2);
This is obvious since while debugging I saw clearly that the size of m.trainIdx is greater than objPoints, i.e I am accessing points which I should not, hence index exceeds
There is scant documentation on use of imgIdx , so anybody who has knowledge on this subject, I need help.
These are the images I used.
Image1
Image2
Image3
1st update after #Amro's response:
With the ratio of min distance to distance at 3.6 , I get the following response.
With the ratio of min distance to distance at 1.6 , I get the following response.
I think it is easier to explain with code, so here it goes :)
%% init
detector = cv.FeatureDetector('ORB');
extractor = cv.DescriptorExtractor('ORB');
matcher = cv.DescriptorMatcher('BruteForce-Hamming');
urls = {
'http://i.imgur.com/8Pz4M9q.jpg?1'
'http://i.imgur.com/1aZj0MI.png?1'
'http://i.imgur.com/pYepuzd.jpg?1'
};
N = numel(urls);
train = struct('img',cell(N,1), 'pts',cell(N,1), 'feat',cell(N,1));
%% training
for i=1:N
% read image
train(i).img = imread(urls{i});
if ~ismatrix(train(i).img)
train(i).img = rgb2gray(train(i).img);
end
% extract keypoints and compute features
train(i).pts = detector.detect(train(i).img);
train(i).feat = extractor.compute(train(i).img, train(i).pts);
% add to training set to match against
matcher.add(train(i).feat);
end
% build index
matcher.train();
%% testing
% lets create a distorted query image from one of the training images
% (rotation+shear transformations)
t = -pi/3; % -60 degrees angle
tform = [cos(t) -sin(t) 0; 0.5*sin(t) cos(t) 0; 0 0 1];
img = imwarp(train(3).img, affine2d(tform)); % try all three images here!
% detect fetures in query image
pts = detector.detect(img);
feat = extractor.compute(img, pts);
% match against training images
m = matcher.match(feat);
% keep only good matches
%hist([m.distance])
m = m([m.distance] < 3.6*min([m.distance]));
% sort by distances, and keep at most the first/best 200 matches
[~,ord] = sort([m.distance]);
m = m(ord);
m = m(1:min(200,numel(m)));
% naive classification (majority vote)
tabulate([m.imgIdx]) % how many matches each training image received
idx = mode([m.imgIdx]);
% matches with keypoints belonging to chosen training image
mm = m([m.imgIdx] == idx);
% estimate homography (used to locate object in query image)
ptsQuery = num2cell(cat(1, pts([mm.queryIdx]+1).pt), 2);
ptsTrain = num2cell(cat(1, train(idx+1).pts([mm.trainIdx]+1).pt), 2);
[H,inliers] = cv.findHomography(ptsTrain, ptsQuery, 'Method','Ransac');
% show final matches
imgMatches = cv.drawMatches(img, pts, ...
train(idx+1).img, train(idx+1).pts, ...
mm(logical(inliers)), 'NotDrawSinglePoints',true);
% apply the homography to the corner points of the training image
[h,w] = size(train(idx+1).img);
corners = permute([0 0; w 0; w h; 0 h], [3 1 2]);
p = cv.perspectiveTransform(corners, H);
p = permute(p, [2 3 1]);
% show where the training object is located in the query image
opts = {'Color',[0 255 0], 'Thickness',4};
imgMatches = cv.line(imgMatches, p(1,:), p(2,:), opts{:});
imgMatches = cv.line(imgMatches, p(2,:), p(3,:), opts{:});
imgMatches = cv.line(imgMatches, p(3,:), p(4,:), opts{:});
imgMatches = cv.line(imgMatches, p(4,:), p(1,:), opts{:});
imshow(imgMatches)
The result:
Note that since you did not post any testing images (in your code you are taking input from the webcam), I created one by distorting one the training images, and using it as a query image. I am using functions from certain MATLAB toolboxes (imwarp and such), but those are non-essential to the demo and you could replace them with equivalent OpenCV ones...
I must say that this approach is not the most robust one.. Consider using other techniques such as the bag-of-word model, which OpenCV already implements.

MATLAB: One Step Ahead Neural Network Timeseries Forecast

Intro: I'm using MATLAB's Neural Network Toolbox in an attempt to forecast time series one step into the future. Currently I'm just trying to forecast a simple sinusoidal function, but hopefully I will be able to move on to something a bit more complex after I obtain satisfactory results.
Problem: Everything seems to work fine, however the predicted forecast tends to be lagged by one period. Neural network forecasting isn't much use if it just outputs the series delayed by one unit of time, right?
Code:
t = -50:0.2:100;
noise = rand(1,length(t));
y = sin(t)+1/2*sin(t+pi/3);
split = floor(0.9*length(t));
forperiod = length(t)-split;
numinputs = 5;
forecasted = [];
msg = '';
for j = 1:forperiod
fprintf(repmat('\b',1,numel(msg)));
msg = sprintf('forecasting iteration %g/%g...\n',j,forperiod);
fprintf('%s',msg);
estdata = y(1:split+j-1);
estdatalen = size(estdata,2);
signal = estdata;
last = signal(end);
[signal,low,high] = preprocess(signal'); % pre-process
signal = signal';
inputs = signal(rowshiftmat(length(signal),numinputs));
targets = signal(numinputs+1:end);
%% NARNET METHOD
feedbackDelays = 1:4;
hiddenLayerSize = 10;
net = narnet(feedbackDelays,[hiddenLayerSize hiddenLayerSize]);
net.inputs{1}.processFcns = {'removeconstantrows','mapminmax'};
signalcells = mat2cell(signal,[1],ones(1,length(signal)));
[inputs,inputStates,layerStates,targets] = preparets(net,{},{},signalcells);
net.trainParam.showWindow = false;
net.trainparam.showCommandLine = false;
net.trainFcn = 'trainlm'; % Levenberg-Marquardt
net.performFcn = 'mse'; % Mean squared error
[net,tr] = train(net,inputs,targets,inputStates,layerStates);
next = net(inputs(end),inputStates,layerStates);
next = postprocess(next{1}, low, high); % post-process
next = (next+1)*last;
forecasted = [forecasted next];
end
figure(1);
plot(1:forperiod, forecasted, 'b', 1:forperiod, y(end-forperiod+1:end), 'r');
grid on;
Note:
The function 'preprocess' simply converts the data into logged % differences and 'postprocess' converts the logged % differences back for plotting. (Check EDIT for preprocess and postprocess code)
Results:
BLUE: Forecasted Values
RED: Actual Values
Can anyone tell me what I'm doing wrong here? Or perhaps recommend another method to achieve the desired results (lagless prediction of sinusoidal function, and eventually more chaotic timeseries)? Your help is very much appreciated.
EDIT:
It's been a few days now and I hope everyone has enjoyed their weekend. Since no solutions have emerged I've decided to post the code for the helper functions 'postprocess.m', 'preprocess.m', and their helper function 'normalize.m'. Maybe this will help get the ball rollin.
postprocess.m:
function data = postprocess(x, low, high)
% denormalize
logdata = (x+1)/2*(high-low)+low;
% inverse log data
sign = logdata./abs(logdata);
data = sign.*(exp(abs(logdata))-1);
end
preprocess.m:
function [y, low, high] = preprocess(x)
% differencing
diffs = diff(x);
% calc % changes
chngs = diffs./x(1:end-1,:);
% log data
sign = chngs./abs(chngs);
logdata = sign.*log(abs(chngs)+1);
% normalize logrets
high = max(max(logdata));
low = min(min(logdata));
y=[];
for i = 1:size(logdata,2)
y = [y normalize(logdata(:,i), -1, 1)];
end
end
normalize.m:
function Y = normalize(X,low,high)
%NORMALIZE Linear normalization of X between low and high values.
if length(X) <= 1
error('Length of X input vector must be greater than 1.');
end
mi = min(X);
ma = max(X);
Y = (X-mi)/(ma-mi)*(high-low)+low;
end
I didn't check you code, but made a similar test to predict sin() with NN. The result seems reasonable, without a lag. I think, your bug is somewhere in synchronization of predicted values with actual values.
Here is the code:
%% init & params
t = (-50 : 0.2 : 100)';
y = sin(t) + 0.5 * sin(t + pi / 3);
sigma = 0.2;
n_lags = 12;
hidden_layer_size = 15;
%% create net
net = fitnet(hidden_layer_size);
%% train
noise = sigma * randn(size(t));
y_train = y + noise;
out = circshift(y_train, -1);
out(end) = nan;
in = lagged_input(y_train, n_lags);
net = train(net, in', out');
%% test
noise = sigma * randn(size(t)); % new noise
y_test = y + noise;
in_test = lagged_input(y_test, n_lags);
out_test = net(in_test')';
y_test_predicted = circshift(out_test, 1); % sync with actual value
y_test_predicted(1) = nan;
%% plot
figure,
plot(t, [y, y_test, y_test_predicted], 'linewidth', 2);
grid minor; legend('orig', 'noised', 'predicted')
and the lagged_input() function:
function in = lagged_input(in, n_lags)
for k = 2 : n_lags
in = cat(2, in, circshift(in(:, end), 1));
in(1, k) = nan;
end
end