Below you can see the code for convolution of two continuous functions. There is a function called fx which I took as the square root of a Gaussian distribution. The convolution is calculated using 2 methods. In one of them I use the built-in function conv() and in the other I use the definition of the convolution. In mat_conv1 and cont_conv1 the function fx is convolved with itself using the built-in and explicit calculation methods respectively. In mat_conv2 and cont_conv2 the two functions mat_conv1 and cont_conv1 are convolved with themselves using their respective methods. You can see the code below.
clear all
clc
a = 0;
b = 1;
c1 = 0.1;
c2 = 0.2;
d1 = 0.3;
d2 = 0.4;
ymin = -2;
ymax = 2;
ynum = 400;
ylist = linspace(ymin,ymax,ynum);
conv_1 = mat_conv2(ylist,d1,d2,c1,c2,a,b);
conv_2 = cont_conv2(ylist,d1,d2,c1,c2,a,b);
hold on
plot(ylist,conv_1,'DisplayName','conv()');
plot(ylist,conv_2','DisplayName','continuous');
legend('-DynamicLegend');
function res = fx(x,a,b)
res = sqrt(normpdf(x,a,b));
end
function res = mat_conv1(y,c1,c2,a,b)
dy = y(2)-y(1);
gx_1 = fx(y/c1,a,b);
gx_2 = fx(y/c2,a,b);
res = (1/(c1*c2))*conv(gx_1,gx_2,'same')*dy;
end
function res = mat_conv2(y,d1,d2,c1,c2,a,b)
dy = y(2)-y(1);
hx_1 = mat_conv1(y/d1,c1,c2,a,b);
hx_2 = mat_conv1(y/d2,c1,c2,a,b);
res = (1/(d1*d2))*conv(hx_1,hx_2,'same')*dy;
end
function res = cont_conv1(y,c1,c2,a,b)
dy = y(2)-y(1);
xx = -2:0.01:2;
for i=1:length(y)
gx_1 = fx(xx/c1,a,b);
gx_2 = fx((y(i)-xx)/c2,a,b);
yy = gx_1.*gx_2;
yy = trapz(xx,yy);
yy = (1/(c1*c2))*yy;
res(i) = yy;
end
end
function res = cont_conv2(y,d1,d2,c1,c2,a,b)
dy = y(2)-y(1);
xx = -2:0.01:2;
for i=1:length(y)
hx_1 = cont_conv1(xx/d1,c1,c2,a,b);
hx_2 = cont_conv1((y(i)-xx)/d2,c1,c2,a,b);
yy = hx_1.*hx_2;
yy = trapz(xx,yy);
yy = (1/(d1*d2))*yy;
res(i) = yy;
end
end
As you can see from the figure below the results match pretty neatly.
Now I only want to consider the positive domain. In other words, I want to take the convolution in the interval [0,2]. So I change ymin to zero and inside the functions cont_conv1 and cont_conv2 I start the vector xx from zero. The code is:
clear all
clc
a = 0;
b = 1;
c1 = 0.1;
c2 = 0.2;
d1 = 0.3;
d2 = 0.4;
ymin = 0;
ymax = 2;
ynum = 400;
ylist = linspace(ymin,ymax,ynum);
conv_1 = mat_conv2(ylist,d1,d2,c1,c2,a,b);
conv_2 = cont_conv2(ylist,d1,d2,c1,c2,a,b);
hold on
plot(ylist,conv_1,'DisplayName','conv()');
plot(ylist,conv_2','DisplayName','continuous');
legend('-DynamicLegend');
function res = fx(x,a,b)
res = sqrt(normpdf(x,a,b));
end
function res = mat_conv1(y,c1,c2,a,b)
dy = y(2)-y(1);
gx_1 = fx(y/c1,a,b);
gx_2 = fx(y/c2,a,b);
res = (1/(c1*c2))*conv(gx_1,gx_2,'same')*dy;
end
function res = mat_conv2(y,d1,d2,c1,c2,a,b)
dy = y(2)-y(1);
hx_1 = mat_conv1(y/d1,c1,c2,a,b);
hx_2 = mat_conv1(y/d2,c1,c2,a,b);
res = (1/(d1*d2))*conv(hx_1,hx_2,'same')*dy;
end
function res = cont_conv1(y,c1,c2,a,b)
dy = y(2)-y(1);
xx = 0:0.01:2;
for i=1:length(y)
gx_1 = fx(xx/c1,a,b);
gx_2 = fx((y(i)-xx)/c2,a,b);
yy = gx_1.*gx_2;
yy = trapz(xx,yy);
yy = (1/(c1*c2))*yy;
res(i) = yy;
end
end
function res = cont_conv2(y,d1,d2,c1,c2,a,b)
dy = y(2)-y(1);
xx = 0:0.01:2;
for i=1:length(y)
hx_1 = cont_conv1(xx/d1,c1,c2,a,b);
hx_2 = cont_conv1((y(i)-xx)/d2,c1,c2,a,b);
yy = hx_1.*hx_2;
yy = trapz(xx,yy);
yy = (1/(d1*d2))*yy;
res(i) = yy;
end
end
The result is given below.
As you can see the results are completely different. What am I doing wrong? How can I get rid of this discrepancy and use conv() to convolve the two functions in the positive domain?
Related
Trying to find the optimal hyperparameters for my svm model using a grid search, but it simply returns 1 for the hyperparameters.
function evaluations = inner_kfold_trainer(C,q,k,features_xy,labels)
features_xy_flds = kdivide(features_xy, k);
labels_flds = kdivide(labels, k);
evaluations = zeros(k,3);
for i = 1:k
fprintf('Fold %i of %i\n',i,k);
train_data = cell2mat(features_xy_flds(1:end ~= i));
train_labels = cell2mat(labels_flds(1:end ~= i));
test_data = cell2mat(features_xy_flds(i));
test_labels = cell2mat(labels_flds(i));
%AU1
train_labels = train_labels(:,1);
test_labels = test_labels(:,1);
[k,~] = size(test_labels);
%train
sv = fitcsvm(train_data,train_labels, 'KernelFunction','polynomial', 'PolynomialOrder',q,'BoxConstraint',C);
sv.predict(test_data);
%Calculate evaluative measures
%svm_outputs = zeros(k,1);
sv_predictions = sv.predict(test_data);
[precision,recall,F1] = evaluation(sv_predictions,test_labels);
evaluations(i,1) = precision;
evaluations(i,2) = recall;
evaluations(i,3) = F1;
end
save('eval.mat', 'evaluations');
end
an inner-fold cross validation function
and below the grid function where something seems to be going wrong
function [q,C] = grid_search(features_xy,labels,k)
% n x n grid
n = 3;
q_grid = linspace(1,19,n);
C_grid = linspace(1,59,n);
tic
evals = zeros(n,n,3);
for i = 1:n
for j = 1:n
fprintf('## i=%i, j=%i ##\n', i, j);
svm_results = inner_kfold_trainer(C_grid(i), q_grid(j),k,features_xy,labels);
evals(i,j,:) = mean(svm_results(:,:));
% precision only
%evals(i,j,:) = max(svm_results(:,1));
toc
end
end
f = evals;
% retrieving the best value of the hyper parameters, to use in the outer
% fold
[M1,I1] = max(f);
[~,I2] = max(M1(1,1,:));
index = I1(:,:,I2);
C = C_grid(index(1))
q = q_grid(index(2))
end
When I run grid_search(features_xy,labels,8) for example, I get C=1 and q=1, for any k(the no. of folds) value. Also features_xy is a 500*98 matrix.
I have implemented 3 function for neural network regression:
1) a forward propagation function that given the training inputs and the net structure calculates the predicted output
function [y_predicted] = forwardProp(Theta,Baias,Inputs,NumberOfLayers,RegressionSwitch)
for i = 1:size(Inputs{1},2)
Activation = (Inputs{1}(:,i))';
for j = 2:NumberOfLayers - RegressionSwitch
Activation = 1./(1+exp(-(Activation*Theta{j-1} + Baias{j-1})));
end
if RegressionSwitch == 1
y_predicted(:,i) = Activation*Theta{end} + Baias{end};
else
y_predicted(:,i) = Activation;
end
end
end
2) a cost function that given the predicted and the desired output, calculates the cost of the network
function [Cost] = costFunction(y_predicted, y, Theta, Baias, Lambda)
Cost = 0;
for j = 1:size(y,2)
for i = 1:size(y,1)
Cost = Cost +(((y(i,j) - y_predicted(i,j))^2)/size(y,2));
end
end
Reg = 0;
for i = 1:size(Theta, 2)
for j = 1:size(Theta{i}, 1)
for k = 1:size(Theta{i}, 2)
Reg = Reg + (Theta{i}(j,k))^2;
end
end
end
for i = 1:size(Baias, 2)
for j = 1:length(Baias{i})
Reg = Reg + (Baias{i}(j))^2;
end
end
Cost = Cost + (Lambda/(2*size(y,2)))*Reg;
end
3) a back propagation function that calculates the partial derivative of the cost function for each weight in the network
function [dTheta, dBaias] = Deltas(Theta,Baias,Inputs,NumberOfLayers,RegressionSwitch, Epsilon, Lambda, y)
for i = 1:size(Theta,2)
for j = 1:size(Theta{i},1)
for k = 1:size(Theta{i},2)
dTp = Theta;
dTm = Theta;
dTp{i}(j,k) = dTp{i}(j,k) + Epsilon;
dTm{i}(j,k) = dTm{i}(j,k) - Epsilon;
y_predicted_p = forwardProp(dTp,Baias,Inputs,NumberOfLayers,RegressionSwitch);
y_predicted_m = forwardProp(dTm,Baias,Inputs,NumberOfLayers,RegressionSwitch);
Cost_p = costFunction(y_predicted_p, y, dTp, Baias, Lambda);
Cost_m = costFunction(y_predicted_m, y, dTm, Baias, Lambda);
dTheta{i}(j,k) = (Cost_p - Cost_m)/(2*Epsilon);
end
end
end
for i = 1:size(Baias,2)
for j = 1:length(Baias{i})
dBp = Baias;
dBm = Baias;
dBp{i}(j) = dTp{i}(j) + Epsilon;
dBm{i}(j) = dTm{i}(j) - Epsilon;
y_predicted_p = forwardProp(Theta,dBp,Inputs,NumberOfLayers,RegressionSwitch);
y_predicted_m =forwardProp(Theta,dBm,Inputs,NumberOfLayers,RegressionSwitch);
Cost_p = costFunction(y_predicted_p, y, Theta, dBp, Lambda);
Cost_m = costFunction(y_predicted_m, y, Theta, dBm, Lambda);
dBaias{i}(j) = (Cost_p - Cost_m)/(2*Epsilon);
end end end
I train the neural network with data from an exact mathematical function of the inputs.
The gradient descent seems to work as the cost decrease each iteration, but when i test the trained network the regression is terrible.
The functions are not meant to be efficient, but they should work so I am really frustrated to see they don't... The main function and the data are ok so the problem should be here. Can you please help me to spot it?
here is the "main":
clear;
clc;
Nodes_X = 5;
Training_Data = 1000;
x = rand(Nodes_X, Training_Data)*3;
y = zeros(2,Training_Data);
for j = 1:Nodes_X
for i = 1:Training_Data
y(1,i) = (x(1,i)^2)+x(2,i)-x(3,i)+2*x(4,i)/x(5,i);
y(2,i) = (x(5,i)^2)+x(2,i)-x(3,i)+2*x(4,i)/x(1,i);
end
end
vx = rand(Nodes_X, Training_Data)*3;
vy = zeros(2,Training_Data);
for j = 1:Nodes_X
for i = 1:Training_Data
vy(1,i) = (vx(1,i)^2)+vx(2,i)-vx(3,i)+2*vx(4,i)/vx(5,i);
vy(2,i) = (vx(5,i)^2)+vx(2,i)-vx(3,i)+2*vx(4,i)/vx(1,i);
end
end
%%%%%%%%%%%%%%%%%%%%%%ASSIGN NODES TO EACH LAYER%%%%%%%%%%%%%%%%%%%%%%%%%%%
NumberOfLayers = 4;
Nodes(1) = 5;
Nodes(2) = 10;
Nodes(3) = 10;
Nodes(4) = 2;
if length(Nodes) ~= NumberOfLayers || (Nodes(1)) ~= size(x, 1)
WARNING = msgbox('Nodes assigned incorrectly!');
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%INITIALIZATION%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i = 1:NumberOfLayers-1
Theta{i} = rand(Nodes(i),Nodes(i+1));
Baias{i} = rand(1,Nodes(i+1));
end
Inputs{1} = x;
Outputs{1} = y;
RegressionSwitch = 1;
Lambda = 10;
Epsilon = 0.00001;
Alpha = 0.01;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%TRAINING%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Epoch = 0;
figure;
hold on;
while Epoch <=20
%%%%%%%%%%%%%%%%%%%%FORWARD PROPAGATION%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
y_predicted = forwardProp(Theta,Baias,Inputs,NumberOfLayers,RegressionSwitch);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%COST%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Cost = costFunction(y_predicted, y, Theta, Baias, Lambda);
scatter(Epoch,Cost);
pause(0.01);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%BACK PROPAGATION%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[dTheta, dBaias] = Deltas(Theta,Baias,Inputs,NumberOfLayers,RegressionSwitch, Epsilon, Lambda, y);
%%%%%%%%%%%%%%%GRADIENT DESCENT%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i = 1:size(Theta,2)
Theta{i} = Theta{i}-Alpha*dTheta{i};
end
for i = 1:size(Baias,2)
Baias{i} = Baias{i}-Alpha*dBaias{i};
end
Epoch = Epoch + 1;
end
hold off;
V_Inputs{1} = vx;
V_y_predicted = forwardProp(Theta,Baias,V_Inputs,NumberOfLayers,RegressionSwitch);
figure;
hold on;
for i = 1:size(vy,2)
scatter(vy(1,i),V_y_predicted(1,i));
pause(0.01);
end
hold off;
figure;
hold on;
for i = 1:size(vy,2)
scatter(vy(2,i),V_y_predicted(2,i));
pause(0.01);
end
hold off;
function Test()
a = 2;
b = 1;
c = 0.5;
q = 0.001;
r = 10;
function F = Useful(x) %calculates existing values for x with size 11
eq1 = (1*(0.903*x(2))^(-1))-(0.903*x(1));
eq2 = (1*(0.665*x(3))*(0.903*x(2))^(-1))-0.903*x(4);
eq3 = (1*(0.399*x(5))*(0.903*x(2)))-0.665*x(6);
eq4 = (1*(0.399*x(5))*(0.903*x(2))^2)-0.903*x(7);
eq5 = (1*(0.399*x(5))*(0.903*x(2))^3)-1*x(8);
eq6 = (1*(0.665*x(3))*(0.399*x(5))*(0.903*x(2)))-1*x(9);
eq7 = (1*(0.665*x(3))*(0.399*x(5))*(0.903*x(2))^2)-0.903*x(10);
eq8 = (1*(0.665*x(3))*(0.399*x(5)))-0.903*x(11);
eq9 = x(3)+x(4)+x(9)+x(10)+x(11)-a;
eq10 = x(5)+x(6)+x(7)+x(8)+x(9)+x(10)+x(11)-b;
eq11 = x(2)+x(6)+2*x(7)+3*x(8)+x(9)+2*x(10)-x(1)-x(4)-c;
F = [eq1;eq2;eq3;eq4;eq5;eq6;eq7;eq8; eq9; eq10; eq11];
end
Value(1,1) = 0;
for d = 2:100
x = fsolve(#Useful,x0,options); %Produces the x(1) to x(11) values
Value(1,d) = (x(3)+x(5))*d+Value(1,d-1); %Gives a new value after each iteration
a = a-x(3);
b = b-x(5);
c = c-x(2);
end
function Zdot = rhs(t,z) %z = (e1,e2,e3,e4,e5)
Zdot=zeros(5,1);
Zdot(1) = -1*z(1);
Zdot(2) = 1*z(1);
Zdot(3) = 1*z(1) - 1*z(2)*z(3);
Zdot(4) = 1*1*z(1) - Value(1,100)*H(z(3))*z(4)*z(4);
Zdot(5) = Value(1,100)*H(z(3))*(z(4));
end
function hill = H(x)
hill = q/(q+x^r);
end
[T,Y] = ode15s(#rhs, [0, 120], [1, 0, 1, 0, 0]); %Solve second function with values giving z(1) to z(5)
plot(T,Y(:,5))
end
I'm wondering, is it possible to pass on each Value obtained (Value (1), Value (2)... so on), into "function Zdot" or is only the final value possible to pass on? Essentially is this possible to implement:
function Zdot = rhs(t,z) %z = (e1,e2,e3,e4,e5)
Zdot=zeros(5,1);
Zdot(1) = -1*z(1);
Zdot(2) = 1*z(1);
Zdot(3) = 1*z(1) - 1*z(2)*z(3);
Zdot(4) = 1*1*z(1) - Value(1,d)*H(z(3))*z(4)*z(4);
Zdot(5) = Value(1,d)*H(z(3))*(z(4));
end
Any insights would be much appreciated and I would be extremely grateful. Thank you in advance!
This is my Approximate entropy Calculator in MATLAB. https://en.wikipedia.org/wiki/Approximate_entropy
I'm not sure why it isn't working. It's returning a negative value.Can anyone help me with this? R1 being the data.
FindSize = size(R1);
N = FindSize(1);
% N = input ('insert number of data values');
%if you want to put your own N in, take away the % from the line above
and
%insert the % before the N = FindSize(1)
%m = input ('insert m: integer representing length of data, embedding
dimension ');
m = 2;
%r = input ('insert r: positive real number for filtering, threshold
');
r = 0.2*std(R1);
for x1= R1(1:N-m+1,1)
D1 = pdist2(x1,x1);
C11 = (D1 <= r)/(N-m+1);
c1 = C11(1);
end
for i1 = 1:N-m+1
s1 = sum(log(c1));
end
phi1 = (s1/(N-m+1));
for x2= R1(1:N-m+2,1)
D2 = pdist2(x2,x2);
C21 = (D2 <= r)/(N-m+2);
c2 = C21(1);
end
for i2 = 1:N-m+2
s2 = sum(log(c2));
end
phi2 = (s2/(N-m+2));
Ap = phi1 - phi2;
Apen = Ap(1)
Following the documentation provided by the Wikipedia article, I developed this small function that calculates the approximate entropy:
function res = approximate_entropy(U,m,r)
N = numel(U);
res = zeros(1,2);
for i = [1 2]
off = m + i - 1;
off_N = N - off;
off_N1 = off_N + 1;
x = zeros(off_N1,off);
for j = 1:off
x(:,j) = U(j:off_N+j);
end
C = zeros(off_N1,1);
for j = 1:off_N1
dist = abs(x - repmat(x(j,:),off_N1,1));
C(j) = sum(~any((dist > r),2)) / off_N1;
end
res(i) = sum(log(C)) / off_N1;
end
res = res(1) - res(2);
end
I first tried to replicate the computation shown the article, and the result I obtain matches the result shown in the example:
U = repmat([85 80 89],1,17);
approximate_entropy(U,2,3)
ans =
-1.09965411068114e-05
Then I created another example that shows a case in which approximate entropy produces a meaningful result (the entropy of the first sample is always less than the entropy of the second one):
% starting variables...
s1 = repmat([10 20],1,10);
s1_m = mean(s1);
s1_s = std(s1);
s2_m = 0;
s2_s = 0;
% datasample will not always return a perfect M and S match
% so let's repeat this until equality is achieved...
while ((s1_m ~= s2_m) && (s1_s ~= s2_s))
s2 = datasample([10 20],20,'Replace',true,'Weights',[0.5 0.5]);
s2_m = mean(s2);
s2_s = std(s2);
end
m = 2;
r = 3;
ae1 = approximate_entropy(s1,m,r)
ae2 = approximate_entropy(s2,m,r)
ae1 =
0.00138568170752751
ae2 =
0.680090884817465
Finally, I tried with your sample data:
fid = fopen('O1.txt','r');
U = cell2mat(textscan(fid,'%f'));
fclose(fid);
m = 2;
r = 0.2 * std(U);
approximate_entropy(U,m,r)
ans =
1.08567461184858
I am using contourf function with binary image. I am trouble how i can get the area and centroid of the different surface in the image, need this task to classify the objects.
You need to use the the Contour Matrix output
Here is an example:
function data = ContourInfo(C)
data = [];
if isempty(C)
return
end
k = 1;
j = 1;
while j < size(C,2);
data(k).numxy = C(2,j);
data(k).x = C(1,j+1:j+data(k).numxy);
data(k).y = C(2,j+1:j+data(k).numxy);
data(k).level = C(1,j);
[data(k).centroid(1) data(k).centroid(2) data(k).area] = ...
polycentroid(data(k).x, data(k).y);
data(k).area = polyarea(data(k).x, data(k).y);
data(k).centroid = polycentroid(data(k).x, data(k).y);
j = j + data(k).numxy + 1;
k = k+1;
end
function [x0,y0,a] = polycentroid(x,y)
[m1,n1] = size(x); [m2,n2] = size(y);
n = max(m1,n1);
x = x(:); y = y(:);
x2 = [x(2:n);x(1)];
y2 = [y(2:n);y(1)];
a = 1/2*sum (x.*y2-x2.*y);
x0 = 1/6*sum((x.*y2-x2.*y).*(x+x2))/a;
y0 = 1/6*sum((x.*y2-x2.*y).*(y+y2))/a;
Call as follow:
Z = peaks(20);
[C, h] = contourf(Z,10);
contourData = ContourInfo(C)
disp('Area of contour 1:');
disp(contourData(1).area
disp('Centroid of contour 1:');
disp(contourData(1).centroid);