I am trying to reconstruct an image using the projections from the Neutron image scanner. I am using the following code. I am not able to obtain a meaningful reconstructed image.
Can anybody advise me on where I am going wrong.
much appreciated,
Vani
filename = strcat(' Z:\NIST_Data\2016\SEPT\example reconstructed\carboxylic\carboxylic reconstructed part 3\Coral\',srcFiles(i).name);
I=imread(filename);
P = im2double(I);
if i == 1
array3d = P;
else
array3d = cat(3, array3d, P);
end
end
num = size(array3d,3);
for p = 1:num
PR = double(squeeze(array3d(p,:,:)));
[L,C]=size(PR);
w = [-pi : (2*pi)/L : pi-(2*pi)/L];
Filt = abs(sin(w));
Filt = Filt(1:463);
for i = 1:C,
IMG = fft(PR(:,i));
end
FiltIMG = IMG*Filt; %FiltIMG = filter (b, a, IMG);
% Remove any remaining imaginary parts
FIL = real(FiltIMG);
% filter the projections
%filtPR = projfilter(PR);
%filtPR = filterplus(PR);
filtPR = FIL;
THETA=0:180;
% figure out how big our picture is going to be.
n = size(filtPR,1);
sideSize = n;
% convert THETA to radians
th = (pi/180)*THETA;
% set up the image
m = length(THETA);
BPI = zeros(sideSize,sideSize);
% find the middle index of the projections
midindex = (n+1)/2;
% set up x and y matrices
x = 1:sideSize;
y = 1:sideSize;
[X,Y] = meshgrid(x,y);
xpr = X - (sideSize+1)/2;
ypr = Y - (sideSize+1)/2;
% loop over each projection
%figure
%colormap(jet)
%M = moviein(m);
for i = 1:m
tic
disp(['On angle ', num2str(THETA(i))]);
% figure out which projections to add to which spots
filtIndex = round(midindex + xpr*sin(th(i)) - ypr*cos(th(i)));
% if we are "in bounds" then add the point
BPIa = zeros(sideSize,sideSize);
spota = find((filtIndex > 0) & (filtIndex <= n));
newfiltIndex = filtIndex(spota);
BPIa(spota) = filtPR(newfiltIndex(:),i);
%keyboard
BPI = BPI + BPIa;
toc
%imagesc(BPI)
%M(:,i) = getframe;
%figure(2)
%plot(filtPR(:,i));
%keyboard
end
BPI = BPI./m;
h=figure
imagesc(BPI)
saveas(h,sprintf('filtsli-FIG%d.tif',p));end
I'm writing the code in Matlab to find interest point using DoG in the image.
Here is the main.m:
imTest1 = rgb2gray(imread('1.jpg'));
imTest1 = double(imTest1);
sigma = 0.6;
k = 5;
thresh = 3;
[x1,y1,r1] = DoG(k,sigma,thresh,imTest1);
%get the interest points and show it on the image with its scale
figure(1);
imshow(imTest1,[]), hold on, scatter(y1,x1,r1,'r');
And the function DoG is:
function [x,y,r] = DoG(k,sigma,thresh,imTest)
x = []; y = []; r = [];
%suppose 5 levels of gaussian blur
for i = 1:k
g{i} = fspecial('gaussian',size(imTest),i*sigma);
end
%so 4 levels of DoG
for i = 1:k-1
d{i} = imfilter(imTest,g{i+1}-g{i});
end
%compare the current pixel in the image to the surrounding pixels (26 points),if it is the maxima/minima, this pixel will be a interest point
for i = 2:k-2
for m = 2:size(imTest,1)-1
for n = 2:size(imTest,2)-1
id = 1;
compare = zeros(1,27);
for ii = i-1:i+1
for mm = m-1:m+1
for nn = n-1:n+1
compare(id) = d{ii}(mm,nn);
id = id+1;
end
end
end
compare_max = max(compare);
compare_min = min(compare);
if (compare_max == d{i}(m,n) || compare_min == d{i}(m,n))
if (compare_min < -thresh || compare_max > thresh)
x = [x;m];
y = [y;n];
r = [r;abs(d{i}(m,n))];
end
end
end
end
end
end
So there's a gaussian function and the sigma i set is 0.6. After running the code, I find the position is not correct and the scales looks almost the same for all interest points. I think my code should work but actually the result is not. Anybody know what's the problem?
I am using contourf function with binary image. I am trouble how i can get the area and centroid of the different surface in the image, need this task to classify the objects.
You need to use the the Contour Matrix output
Here is an example:
function data = ContourInfo(C)
data = [];
if isempty(C)
return
end
k = 1;
j = 1;
while j < size(C,2);
data(k).numxy = C(2,j);
data(k).x = C(1,j+1:j+data(k).numxy);
data(k).y = C(2,j+1:j+data(k).numxy);
data(k).level = C(1,j);
[data(k).centroid(1) data(k).centroid(2) data(k).area] = ...
polycentroid(data(k).x, data(k).y);
data(k).area = polyarea(data(k).x, data(k).y);
data(k).centroid = polycentroid(data(k).x, data(k).y);
j = j + data(k).numxy + 1;
k = k+1;
end
function [x0,y0,a] = polycentroid(x,y)
[m1,n1] = size(x); [m2,n2] = size(y);
n = max(m1,n1);
x = x(:); y = y(:);
x2 = [x(2:n);x(1)];
y2 = [y(2:n);y(1)];
a = 1/2*sum (x.*y2-x2.*y);
x0 = 1/6*sum((x.*y2-x2.*y).*(x+x2))/a;
y0 = 1/6*sum((x.*y2-x2.*y).*(y+y2))/a;
Call as follow:
Z = peaks(20);
[C, h] = contourf(Z,10);
contourData = ContourInfo(C)
disp('Area of contour 1:');
disp(contourData(1).area
disp('Centroid of contour 1:');
disp(contourData(1).centroid);
I have an Image, converted into binary, i got all the black pixel's coordinates.
The 'matrix' contains the x and y coordinates arranged by columns.
Now i Need to make a Simulation, to see if my Programme works.
I have to generate an Matrix Image with my results.
im=imread('square.jpg');
imshow(im); c=im2bw(im); figure; imshow(c);
dim = size(c) % size of the image
x = [];
y = [];
xdif = [];
newx = [];
matrix = [];
for i = 1:dim(1)
for j = 1:dim(2)
if c(i,j)==0;
x = [x i];
y = [y j];
end
end
end
% show black pixel's coordinates
p = [x;y];
%number of pixels
nr = length(x)
dimp = size(p);
xval = p(1,:);
yval = p(2,:);
j=1;
i=1;
for z = 1:dimp(2)-1
xdif = xval(z+1)-xval(z);
ff=find(xdif > 0);
if ff == 1
i = 1;
else
i=i+1;
end
newx(i,j)= xval(z);
newy(i,j)= yval(z);
if ff == 1
j= j+1;
end
end
xsize = size(newx);
ysize = size(newy);
matrix_size = xsize(2)+ysize(2)
xinc = 1;
yinc = 1;
x=1;
for ct = 1:1:matrix_size/2
x;
matrix(:,x) = newx(:,xinc);
matrix(:,x+1) = newy(:,yinc);
matrix;
xinc = xinc+1;
yinc = yinc+1;
x=x+3;
end
matrix
this is my Programme, now i need to make a simulation, by generating an image with my coordinates.
how can i do that?
thank's
I have implemented a perspective projection algorithm according to chapter 6 Computer Graphics Principles and Practices (CGP&P) by Foley, van Dam, Feiner, Hughes (2nd edition). I have
N'per = M * Sper * Spar * T (-prp) * R * T (-vrp).
As I understand it, the final image should be in canonical form size of (-1,-1) to (1,1) and z in (0,-1). However, the final image X-Y dimensions (see Figure 1) do not seem correct. I'm mostly trying to understand how the final image size is determined. I have included the matlab code below. My frustum (f) is defined by eyepoint (EP) at a specified lat/lon that has been converted to ECEF; distances: near plane (nDist) = 300; view plane (vDist) = 900; and far plane (fDist) = 25000. A line of sight (LOS) vector created at the EP is the center of projection. The frustum correctly finds and returns the buildings that within it along the LOS. Field of View is (10 deg x 10 deg). Now I'm just trying to project those buildings onto a defined window so that I can "quantize" (paint?) the grid and indicate which building is located at which x,y pair in the view plane. Unfortunately, because the window is not returning at the indicated size, it makes the painting more difficult for me. And besides, I'd just like to know what I'm doing wrong to not end up with the correct dimensions.
Matlab code (no attempts at optimizations or anything. just brute-force implementation!
function iPersProj = getPersProj(bldg, bi, f, plotpersp, fPersPlot)
color = [rand rand rand];
face = eFaces.bottom;
iPersProjBtm = persproj(f, bldg, face);
face = eFaces.top;
iPersProjTop = persproj(f, bldg, face);
iPersProj = [iPersProjTop;iPersProjBtm];
hold on;
scatter3(iPersProjTop(:,1), ...
iPersProjTop(:,2), ...
iPersProjTop(:,3),'+','CData',color);
scatter3(iPersProjBtm(:,1), ...
iPersProjBtm(:,2), ...
iPersProjBtm(:,3),'o','CData',color);
pPersProj=[iPersProjTop;
iPersProjTop(1,:); ...
iPersProjBtm; ...
iPersProjBtm(1,:); ...
iPersProjBtm(2,:); ...
iPersProjTop(4,:); ...
iPersProjTop(3,:); ...
iPersProjBtm(3,:); ...
iPersProjBtm(4,:); ...
iPersProjTop(2,:); ...
iPersProjTop(1,:)];
line (pPersProj(:,1), pPersProj(:,2),'Color',color);
text (pPersProj(1,1), pPersProj(1,2), int2str(bi));
end
function proj = persproj(f, bldg, face)
vrp = f.vC; %center view plane
vpn = f.Z; % LOS for frustum
cop = -f.EP;
F = f.vDist - f.nDist;
B = f.vDist - f.fDist;
umin = -5;
vmin = -5;
umax = 5;
vmax = 5;
R = getrotation (f);
Tvrp = gettranslation(-vrp);
ed = R * Tvrp * [f.EP 1]'; %translate eyepoint to camera?
prp = [0 0 ed(3)];
sh = getsh(prp, umax, umin, vmax, vmin);
Tprp = gettranslation(-prp);
vrpp = -prp(3); %(sh * Tprp * [0;0;0;1]); %vrp-prime per CGP&P
zmin = -(vrpp + F)/(vrpp+B);
zmax = -(vrpp + B)/(vrpp+B);
zprj = -vrpp/(vrpp+B);
sper = getsper(vrpp, B, umax, umin, vmax, vmin);
M=[ 1 0 0 0; ...
0 1 0 0; ...
0 0 1/(1+zmin) -zmin/(1+zmin); ...
0 0 -1 0];
proj = zeros(4,4);
for i=1:4
Q=bldg.coords(i,:,face);
uvdw = M * sper * sh * Tprp * R * Tvrp * [Q';1];
proj (i,1) = uvdw(1);
proj (i,2) = uvdw(2);
proj (i,3) = uvdw(3);
end
end
function sper = getsper (vrpz, B, umax, umin, vmax, vmin)
dx=umax-umin;
dy=vmax-vmin;
sper=zeros(4,4);
sper(1,1) = 2*vrpz/(dx*(vrpz+B));
sper(2,2) = 2*vrpz/(dy*(vrpz+B));
sper(3,3) = -1/(vrpz+B);
sper(4,4) = 1;
end
function sh = getsh (prp, umax, umin, vmax, vmin)
sx=umax+umin;
sy=vmax+vmin;
cw = [sx/2 sy/2 0 1]';
dop = cw - [prp 1]';
shx = - dop(1)/dop(3);
shy = - dop(2)/dop(3);
sh=zeros(4,4);
sh(1,1) = 1;
sh(2,2) = 1;
sh(3,3) = 1;
sh(4,4) = 1;
sh(1,3) = shx;
sh(2,3) = shy;
end
function R = getrotation (f)
rz = f.Z;
rx=cross(f.Y, rz);
rx=rx/norm(rx);
ry=cross(rz,rx);
R=zeros(4,4);
R(1,1:3) = rx;
R(2,1:3) = ry;
R(3,1:3) = rz;
R(4,4) = 1;
end
function T = gettranslation(p)
T = zeros(4,4);
T(1:3,4) = p';
T(1,1) = 1;
T(2,2) = 1;
T(3,3) = 1;
T(4,4) = 1;
end
Figure 1: Prospective Projection but dimensions are not (-1,-1) to (1,1)1