I have a question concerning the numerical derivation of an ideal motion law using the Opti-toolbox in Matlab.
There are many constraints to follow, but the main principle of the target function looks like this:
This function is periodic, but only 1 period is used.
All variables are dimensionless and the number of points is n (which is an odd number).
At 0 and 1, the function yields 0.
In the middle, at (n+1)/2, the function yields e.g. 0.3.
The function needs to monotonically increase in the first halve, and decrease in the second half.
The velocity is needs to be between -0.8 and 1.
The peak acceleration will be limited when an other cost function is used.
I want to have an as low as possible peak value for the acceleration. Later this cost function will be changed.
My attempt using the Opti-toolbox looks like this:
clear all
close all
%n is number of grid points, only odd numbers allowed!
n=11;
% parameters
tijd=[0:(1/(n-1)):1];
velmin=-0.8;
velmax=1;
accmax=100;
accmin=-100;
%First order derivative:
%Last value doesn't contribute, because function is periodic
%Implementation using FE: deltax=delta_i+1-delta_i-1/2*deltax
%With deltax=(1/(n-1))
A_1=zeros(n-1,n);
A_1(2:n-1,1:n-2)=A_1(2:n-1,1:n-2)-eye(n-2);
A_1(1:n-2,2:n-1)=A_1(1:n-2,2:n-1)+eye(n-2);
A_1(1,n-1)=-1;
A_1(n-1,1)=1;
A_1=A_1*(n-1)/2;
%Lower bound and upper bound + monotonically in and decreasing
lb_111=0.1*ones((n-1)/2,1);
ub_111=velmax*ones((n-1)/2,1);
lb_112=velmin*ones((n-1)/2,1);
ub_112=-0.1*ones((n-1)/2,1);
ub_1=[ub_111; ub_112];
lb_1=[lb_111; lb_112];
%2nd order derivative, same as before.
A_12=zeros(n-1,n);
A_12(2:n-1,1:n-2)=A_12(2:n-1,1:n-2)+eye(n-2);
A_12(1:n-2,2:n-1)=A_12(1:n-2,2:n-1)+eye(n-2);
A_12(1:n-1,1:n-1)=A_12(1:n-1,1:n-1)-2*eye(n-1);
A_12(1,n-1)=1;
A_12(n-1,1)=1;
A_12=A_12*((n-1)^2);
lb_12=accmin*ones(n-1,1);
ub_12=accmax*ones(n-1,1);
%First and last values are 0.
A_2=zeros(2,n);
A_2(1,1)=1;
A_2(2,n)=1;
lb_2=zeros(2,1);
ub_2=zeros(2,1);
%Middle value is known
A_4=zeros(1,n);
A_4(1,(n+1)/2)=1;
lb_4=zeros(1,1);
lb_4(1)=0.3;
ub_4=zeros(1,1);
ub_4(1)=0.3;
%Main derivatives
%1st
af_1=#(x) A_1*x;
%2nd
af_2=#(x) A_12*x;
%Will be needed later, not relevant.
% a_3 = zeros(n-1,n);
% a_3(1:(n-1)/2,(n+1)/2:n-1)=a_3(1:(n-1)/2,(n+1)/2:n-1)+eye((n-1)/2);
% a_3((n+1)/2:n-1,1:(n-1)/2)=a_3((n+1)/2:n-1,1:(n-1)/2)+eye((n-1)/2);
% a_fase=#(x) [a_3*x; x((n+1)/2)];
%Put all linear constraints together
A = [A_4;A_1;A_12;A_2];
lb = [lb_4;lb_1;lb_12;lb_2];
ub = [ub_4;ub_1;ub_12;ub_2];
%Cost function
f = #(x) norm(af_2(x),inf);
%Non linear function later
%nlcon = #(x) [norm(g(x),inf)];
%cl = [-15];
%cu = [15];
%Estimate x0
temp = fliplr(tijd);
x_0 = [tijd(1:(n+1)/2) temp(((n+1)/2+1):end)];
%Options in solver
opts = optiset('solver','ipopt','display','iter');
Opt = opti('fun',f,'lin',A,lb,ub,'x0',x_0,'options',opts);
%Extract solution
delta1 = solve(Opt);
%delta2=a_fase(delta1);
vel1=af_1(delta1);
%vel2=af_1(delta2);
acc1=af_2(delta1);
%acc2=af_2(delta2);
%Tas=g(delta1);
%%%%%%%%%%%%%%%%
%PLOTS
%%%%%%%%%%%%%%%%
figure
subplot(2,2,1);
plot(tijd,delta1)
title('positie')
subplot(2,2,2);
plot(tijd,[vel1;vel1(1)])
title('snelheid')
subplot(2,2,3);
plot(tijd,[acc1;acc1(1)])
title('acceleratie')
subplot(2,2,4);
%plot(tijd,Tas)
title('Askoppel')
But this yields in an infeasible problem.
Does someone has an idea what I'm doing wrong?
See comment:
Result:
Motion result
Related
I tried everything and looked everywhere but can't find any solution for my question.
clc
clear all
%% Solving the Ordinary Differential Equation
G = 6.67408e-11; %Gravitational constant
M = 10; %Mass of the fixed object
r = 1; %Distance between the objects
tspan = [0 100000]; %Time Progression from 0 to 100000s
conditions = [1;0]; %y0= 1m apart, v0=0 m/s
F=#(t,y)var_r(y,G,M,r);
[t,y]=ode45(F,tspan,conditions); %ODE solver algorithm
%%part1: Plotting the Graph
% plot(t,y(:,1)); %Plotting the Graph
% xlabel('time (s)')
% ylabel('distance (m)')
%% part2: Animation of Results
plot(0,0,'b.','MarkerSize', 40);
hold on %to keep the first graph
for i=1:length(t)
k = plot(y(i,1),0,'r.','MarkerSize', 12);
pause(0.05);
axis([-1 2 -2 2]) %Defining the Axis
xlabel('X-axis') %X-Axis Label
ylabel('Y-axis') %Y-Axis Label
delete(k)
end
function yd=var_r(y,G,M,r) %function of variable r
g = (G*M)/(r + y(1))^2;
yd = [y(2); -g];
end
this is the code where I'm trying to replace the ode45 with the runge kutta method but its giving me errors. my runge kutta function:
function y = Runge_Kutta(f,x0,xf,y0,h)
n= (xf-x0)/h;
y=zeros(n+1,1);
x=(x0:h:xf);
y(1) = y0;
for i=1:n
k1 = f(x(i),y(i));
k2= f(x(i)+ h/2 , y(i) +h*(k1)/2);
y(i+1) = y(i)+(h*k2);
end
plot(x,y,'-.M')
legend('RKM')
title ('solution of y(x)');
xlabel('x');
ylabel('y(x)')
hold on
end
Before converting your ode45( ) solution to manually written RK scheme, it doesn't even look like your ode45( ) solution is correct. It appears you have a gravitational problem set up where the initial velocity is 0 so a small object will simply fall into a large mass M on a line (rectilinear motion), and that is why you have scalar position and velocity.
Going with this assumption, r is something you should be calculating on the fly, not using as a fixed input to the derivative function. E.g., I would have expected something like this:
F=#(t,y)var_r(y,G,M); % get rid of r
:
function yd=var_r(y,G,M) % function of current position y(1) and velocity y(2)
g = (G*M)/y(1)^2; % gravity accel based on current position
yd = [y(2); -g]; % assumes y(1) is positive, so acceleration is negative
end
The small object must start with a positive initial position for the derivative code to be valid as you have it written. As the small object falls into the large mass M, the above will only hold until it hits the surface or atmosphere of M. Or if you model M as a point mass, then this scheme will become increasingly difficult to integrate correctly because the acceleration becomes large without bound as the small mass gets very close to the point mass M. You would definitely need a variable step size approach in this case. The solution becomes invalid if it goes "through" mass M. In fact, once the speed gets too large the whole setup becomes invalid because of relativistic effects.
Maybe you could explain in more detail if your system is supposed to be set up this way, and what the purpose of the integration is. If it is really supposed to be a 2D or 3D problem, then more states need to be added.
For your manual Runge-Kutta code, you completely forgot to integrate the velocity so this is going to fail miserably. You need to carry a 2-element state from step to step, not a scalar as you are currently doing. E.g., something like this:
y=zeros(2,n+1); % 2-element state as columns of the y variable
x=(x0:h:xf);
y(:,1) = y0; % initial state is the first 2-element column
% change all the scalar y(i) to column y(:,i)
for i=1:n
k1 = f(x(i),y(:,i));
k2= f(x(i)+ h/2 , y(:,i) +h*(k1)/2);
y(:,i+1) = y(:,i)+(h*k2);
end
plot(x,y(1,:),'-.M') % plot the position part of the solution
This is all assuming the f that gets passed in is the same F you have in your original code.
y(1) is the first scalar element in the data structure of y (this counts in column-first order). You want to generate in y a list of column vectors, as your ODE is a system with state dimension 2. Thus you need to generate y with that format, y=zeros(length(x0),n+1); and then address the list entries as matrix columns y(:,1)=x0 and the same modification in every place where you extract or assign a list entry.
Matlab introduce various short-cuts that, if used consequently, lead to contradictions (I think the script-hater rant (german) is still valid in large parts). Essentially, unlike in other systems, Matlab gives direct access to the underlying data structure of matrices. y(k) is the element of the underlying flat array (that is interpreted column-first in Matlab like in Fortran, unlike, e.g., Numpy where it is row-first).
Only the two-index access is to the matrix with its dimensions. So y(:,k) is the k-th matrix column and y(k,:) the k-th matrix row. The single-index access is nice for row or column vectors, but leads immediately to problems when collecting such vectors in lists, as these lists are automatically matrices.
I have adapted the code in Comparing FFT of Function to Analytical FT Solution in Matlab for this question. I am trying to do FFTs and comparing the result with analytical expressions in the Wikipedia tables.
My code is:
a = 1.223;
fs = 1e5; %sampling frequency
dt = 1/fs;
t = 0:dt:30-dt; %time vector
L = length(t); % no. sample points
t = t - 0.5*max(t); %center around t=0
y = ; % original function in time
Y = dt*fftshift(abs(fft(y))); %numerical soln
freq = (-L/2:L/2-1)*fs/L; %freq vector
w = 2*pi*freq; % angular freq
F = ; %analytical solution
figure; subplot(1,2,1); hold on
plot(w,real(Y),'.')
plot(w,real(F),'-')
xlabel('Frequency, w')
title('real')
legend('numerical','analytic')
xlim([-5,5])
subplot(1,2,2); hold on;
plot(w,imag(Y),'.')
plot(w,imag(F),'-')
xlabel('Frequency, w')
title('imag')
legend('numerical','analytic')
xlim([-5,5])
If I study the Gaussian function and let
y = exp(-a*t.^2); % original function in time
F = exp(-w.^2/(4*a))*sqrt(pi/a); %analytical solution
in the above code, looks like there is good agreement when the real and imaginary parts of the function are plotted:
But if I study a decaying exponential multiplied with a Heaviside function:
H = #(x)1*(x>0); % Heaviside function
y = exp(-a*t).*H(t);
F = 1./(a+1j*w); %analytical solution
then
Why is there a discrepancy? I suspect it's related to the line Y = but I'm not sure why or how.
Edit: I changed the ifftshift to fftshift in Y = dt*fftshift(abs(fft(y)));. Then I also removed the abs. The second graph now looks like:
What is the mathematical reason behind the 'mirrored' graph and how can I remove it?
The plots at the bottom of the question are not mirrored. If you plot those using lines instead of dots you'll see the numeric results have very high frequencies. The absolute component matches, but the phase doesn't. When this happens, it's almost certainly a case of a shift in the time domain.
And indeed, you define the time domain function with the origin in the middle. The FFT expects the origin to be at the first (leftmost) sample. This is what ifftshift is for:
Y = dt*fftshift(fft(ifftshift(y)));
ifftshift moves the origin to the first sample, in preparation for the fft call, and fftshift moves the origin of the result to the middle, for display.
Edit
Your t does not have a 0:
>> t(L/2+(-1:2))
ans =
-1.5000e-05 -5.0000e-06 5.0000e-06 1.5000e-05
The sample at t(floor(L/2)+1) needs to be 0. That is the sample that ifftshift moves to the leftmost sample. (I use floor there in case L is odd in size, not the case here.)
To generate a correct t do as follows:
fs = 1e5; % sampling frequency
L = 30 * fs;
t = -floor(L/2):floor((L-1)/2);
t = t / fs;
I first generate an integer t axis of the right length, with 0 at the correct location (t(floor(L/2)+1)==0). Then I convert that to seconds by dividing by the sampling frequency.
With this t, the Y as I suggest above, and the rest of your code as-is, I see this for the Gaussian example:
>> max(abs(F-Y))
ans = 4.5254e-16
For the other function I see larger differences, in the order of 6e-6. This is due to the inability to sample the Heaviside function. You need t=0 in your sampled function, but H doesn't have a value at 0. I noticed that the real component has an offset of similar magnitude, which is caused by the sample at t=0.
Typically, the sampled Heaviside function is set to 0.5 for t=0. If I do that, the offset is removed completely, and max difference for the real component is reduced by 3 orders of magnitude (largest errors happen for values very close to 0, where I see a zig-zag pattern). For the imaginary component, the max error is reduced to 3e-6, still quite large, and is maximal at high frequencies. I attribute these errors to the difference between the ideal and sampled Heaviside functions.
You should probably limit yourself to band-limited functions (or nearly-band-limited ones such as the Gaussian). You might want to try to replace the Heaviside function with an error function (integral of Gaussian) with a small sigma (sigma = 0.8 * fs is the smallest sigma I would consider for proper sampling). Its Fourier transform is known.
I have an instrument which produces roughly sinusoidal data, but with frequency varying slightly in time. I am using MATLAB to prototype some code to characterize the time dependence, but I'm running into some issues.
I am generating an idealized approximation of my data, I(t) = sin(2 pi f(t) t), with f(t) variable but currently tested as linear or quadratic. I then implement a sliding Hamming window (of width w) to generate a set of Fourier transforms F[I(t), t'] corresponding to the data points in I(t), and each F[I(t), t'] is fit with a Gaussian to more precisely determine the peak location.
My current MATLAB code is:
fs = 1000; %Sample frequency (Hz)
tlim = [0,1];
t = (tlim(1)/fs:1/fs:tlim(2)-1/fs)'; %Sample domain (t)
N = numel(t);
f = #(t) 100-30*(t-0.5).^2; %Frequency function (Hz)
I = sin(2*pi*f(t).*t); %Sample function
w = 201; %window width
ww=floor(w/2); %window half-width
for i=0:2:N-w
%Take the FFT of a portion of I, convolved with a Hamming window
II = 1/(fs*N)*abs(fft(I((1:w)+i).*hamming(w))).^2;
II = II(1:floor(numel(II)/2));
p = (0:fs/w:(fs/2-fs/w))';
%Find approximate FFT maximum
[~,maxIx] = max(II);
maxLoc = p(maxIx);
%Fit the resulting FFT with a Gaussian function
gauss = #(c,x) c(1)*exp(-(x-c(2)).^2/(2*c(3)^2));
op = optimset('Display','off');
mdl = lsqcurvefit(gauss,[max(II),maxLoc,10],p,II,[],[],op);
%Generate diagnostic plots
subplot(3,1,1);plot(p,II,p,gauss(mdl,p))
line(f(t(i+ww))*[1,1],ylim,'color','r');
subplot(3,1,2);plot(t,I);
line(t(1+i)*[1,1],ylim,'color','r');line(t(w+i)*[1,1],ylim,'color','r')
subplot(3,1,3);plot(t(i+ww),f(t(i+ww)),'b.',t(i+ww),mdl(2),'r.');
hold on
xlim([0,max(t)])
drawnow
end
hold off
My thought process is that the peak location in each F[I(t), t'] should be a close approximation of the frequency at the center of the window which was used to produce it. However, this does not seem to be the case, experimentally.
I have had some success using discrete Fourier analysis for engineering problems in the past, but I've only done coursework on continuous Fourier transforms--so there may be something obvious that I'm missing. Also, this is my first question on StackExchange, so constructive criticism is welcome.
So it turns out that my problem was a poor understanding of the mathematics of the sine function. I had assumed that the frequency of the wave was equal to whatever was multiplied by the time variable (e.g. the f in sin(ft)). However, it turns out that the frequency is actually defined by the derivative of the entire argument of the sine function--the rate of change of the phase.
For constant f the two definitions are equal, since d(ft)/dt = f. But for, say, f(t) = sin(t):
d(f(t)t)/dt = d(sin(t) t)/dt = t cos(t) + sin(t)
The frequency varies as a function very different from f(t). Changing the function definition to the following fixed my problem:
f = #(t) 100-30*(t-0.5).^2; %Frequency function (Hz)
G = cumsum(f(t))/fs; %Phase function (Hz)
I = sin(2*pi*G); %Sampling function
My approach
fun = #(y) (1/sqrt(pi))*exp(-(y-1).^2).*log(1 + exp(-4*y))
integral(fun,-Inf,Inf)
This gives NaN.
So I tried plotting it.
y= -10:0.1:10;
plot(y,exp(-(y-1).^2).*log(1 + exp(-4*y)))
Then understood that domain (siginificant part) is from -4 to +4.
So changed the limits to
integral(fun,-10,10)
However I do not want to always plot the graph and then know its limits. So is there any way to know the integral directly from -Inf to Inf.
Discussion
If your integrals are always of the form
I would use a high-order Gauss–Hermite quadrature rule.
It's similar to the Gauss-Legendre-Kronrod rule that forms the basis for quadgk but is specifically tailored for integrals over the real line with a standard Gaussian multiplier.
Rewriting your equation with the substitution x = y-1, we get
.
The integral can then be computed using the Gauss-Hermite rule of arbitrary order (within reason):
>> order = 10;
>> [nodes,weights] = GaussHermiteRule(order);
>> f = #(x) log(1 + exp(-4*(x+1)))/sqrt(pi);
>> sum(f(nodes).*weights)
ans =
0.1933
I'd note that the function below builds a full order x order matrix to compute nodes, so it shouldn't be made too large.
There is a way to avoid this by explicitly computing the weights, but I decided to be lazy.
Besides, event at order 100, the Gaussian multiplier is about 2E-98, so the integrand's contribution is extremely minimal.
And while this isn't inherently adaptive, a high-order rule should be sufficient in most cases ... I hope.
Code
function [nodes,weights] = GaussHermiteRule(n)
% ------------------------------------------------------------------------------
% Find the nodes and weights for a Gauss-Hermite Quadrature integration.
%
if (n < 1)
error('There is no Gauss-Hermite rule of order 0.');
elseif (n < 0) || (abs(n - round(n)) > eps())
error('Given order ''n'' must be a strictly positive integer.');
else
n = round(n);
end
% Get the nodes and weights from the Golub-Welsch function
n = (0:n)' ;
b = n*0 ;
a = b + 0.5 ;
c = n ;
[nodes,weights] = GolubWelsch(a,b,c,sqrt(pi));
end
function [xk,wk] = GolubWelsch(ak,bk,ck,mu0)
%GolubWelsch
% Calculate the approximate* nodes and weights (normalized to 1) of an orthogonal
% polynomial family defined by a three-term reccurence relation of the form
% x pk(x) = ak pkp1(x) + bk pk(x) + ck pkm1(x)
%
% The weight scale factor mu0 is the integral of the weight function over the
% orthogonal domain.
%
% Calculate the terms for the orthonormal version of the polynomials
alpha = sqrt(ak(1:end-1) .* ck(2:end));
% Build the symmetric tridiagonal matrix
T = full(spdiags([[alpha;0],bk,[0;alpha]],[-1,0,+1],length(alpha),length(alpha)));
% Calculate the eigenvectors and values of the matrix
[V,xk] = eig(T,'vector');
% Calculate the weights from the eigenvectors - technically, Golub-Welsch requires
% a normalization, but since MATLAB returns unit eigenvectors, it is omitted.
wk = mu0*(V(1,:).^2)';
end
I've had success with transforming such infinite-bounded integrals using a numerical variable transformation, as explained in Numerical Recipes 3e, section 4.5.3. Basically, you substitute in y=c*tan(t)+b and then numerically integrate over t in (-pi/2,pi/2), which sweeps y from -infinity to infinity. You can tune the values of c and b to optimize the process. This approach largely dodges the question of trying to determine cutoffs in the domain, but for this to work reliably using quadrature you have to know that the integrand does not have features far from y=b.
A quick and dirty solution would be to look for a position, where your function is sufficiently small enough and then taking it as limits. This assumes that for x>0 the function fun decreases montonically and fun(x) is roughly the same size as fun(-x) for all x.
%// A small number
epsilon = eps;
%// Stepsize for searching bound
stepTest = 1;
%// Starting position for searching bound
position = 0;
%// Not yet small enough
smallEnough = false;
%// Search bound
while ~smallEnough
smallEnough = (fun(position) < eps);
position = position + stepTest;
end
%// Calculate integral
integral(fun, -position, position)
If your were happy with plotting the function, deciding by eye where you can cut, then this code will suffice, I guess.
I have a set of odes written in matrix form as $X' = AX$; I also have a desired value of the states $X_des$. $X$ is a five dimensional vector. I want to stop the integration after all the states reach their desired values (or atleast close to them by $1e{-3}$). How do I use event function in matlab to do this? (All the help I have seen are about 1 dimensional states)
PS: I know for sure that all the states approach their desired values after long time. I just want to stop the integration once they are $1e{-3}$ within the desired values.
First, I presume that you're aware that you can use the matrix exponential (expm in Matlab) to solve your system of linear differential equations directly.
There are many ways to accomplish what you're trying to do. They all depend a bit on your system, how it behaves, and the particular event you want to capture. Here's a small example for a 2-by-2 system of linear differential equations:
function multipleeventsdemo
A = [-1 1;1 -2]; % Example A matrix
tspan = [0 50]; % Initial and final time
x0 = [1;1]; % Initial conditions
f = #(t,y)A*y; % ODE function
thresh = 0; % Threshold value
tol = 1e-3; % Tolerance on threshold
opts = odeset('Events',#(t,y)events(t,y,thresh,tol)); % Create events function
[t,y] = ode45(f,tspan,x0,opts); % Integrate with options
figure;
plot(t,y);
function [value,isterminal,direction] = events(t,y,thresh,tol)
value = y-thresh-tol;
isterminal = all(y-thresh-tol<=0)+zeros(size(y)); % Change termination condition
direction = -1;
Integration is stopped when both states are within tol of thresh. This is accomplished by adjusting the isterminal output of the events function. Note that separate tolerance and threshold variables isn't really necessary – you simply need to define the crossing value.
If your system oscillates as it approaches it's steady state (if A has complex eigenvalues), then you'll need to do more work. But you questions doesn't indicate this. And again, numerical integration may not be the easiest/best way to solve your problem which such a system. Here is how you could use expm in conjunction with a bit of symbolic math:
A = [-1 1;1 -2];
x0 = [1;1];
tol = 1e-3;
syms t_sym
y = simplify(expm(A*t_sym)*x0) % Y as a function of t
t0 = NaN(1,length(x0));
for i = 1:length(x0)
sol = double(solve(y(i)==tol,t_sym)) % Solve for t when y(i) equal to tol
if ~isempty(sol) % Could be no solution, then NaN
t0(i) = max(sol); % Or more than one solution, take largest
end
end
f = matlabFunction(y); % Create vectorized function of t
t_vec = linspace(0,max(t0),1e2); % Time vector
figure;
plot(t_vec,f(t_vec));
This will only work for fairly small A, however, because of the symbolic math. Numerical approaches using expm are also possible and likely more scalable.