I am new with Image processing in Matlab, I am trying to segment LUNG and nodules from CT image. I have done initial image enhancement.
I searched lot on the same but I haven't found any relevant materials.
Trying to segment lung part from the given image; and then detecting nodules on Lung part.
Code I tried in Matlab:
d1 = dicomread('000000.dcm');
d1ca = imadjust(d1);
d1nF = wiener2(d1ca);
d1Level = graythresh(d1nF);
d1sBW = im2bw(d1nF,d1Level);
sed = strel('diamon',1);
BWfinal = imerode(d1sBW,sed);
BWfinal = imerode(BWfinal,sed);
BWoutline = bwperim(BWfinal);
Segout = d1nF;
Segout(BWoutline) = 0;
edgePrewitt = edge(d1nF,'prewitt');
Result of above code:
Want results like this:
http://oi41.tinypic.com/35me7pj.jpg
http://oi42.tinypic.com/2jbtk6p.jpg
http://oi44.tinypic.com/w0kthe.jpg
http://oi40.tinypic.com/nmfaio.jpg
http://oi41.tinypic.com/2nvdrie.jpg
http://oi43.tinypic.com/2nvdnhk.jpg
I know its may be easy for experts. Please help me out.
Thank you!
The following is not a Matlab answer! However, OpenCV and Matlab share many features in common, and I'm sure you will be able to translate this C++ code to Matlab with no problems.
For more information about the methods being called, check the OpenCV documentation.
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
int main(int argc, char* argv[])
{
// Load input image (colored, i.e. 3-channel)
cv::Mat input = cv::imread(argv[1]);
if (input.empty())
{
std::cout << "!!! failed imread()" << std::endl;
return -1;
}
// Convert input image to grayscale (1-channel)
cv::Mat grayscale = input.clone();
cv::cvtColor(input, grayscale, cv::COLOR_BGR2GRAY);
What grayscale looks like:
// Erode & Dilate to remove noises and improve the result of the next operation (threshold)
int erosion_type = cv::MORPH_RECT; // MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE
int erosion_size = 3;
cv::Mat element = cv::getStructuringElement(erosion_type,
cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1),
cv::Point(erosion_size, erosion_size));
cv::erode(grayscale, grayscale, element);
cv::dilate(grayscale, grayscale, element);
What grayscale looks like after morphological operations:
// Threshold to segment the area of the lungs
cv::Mat thres;
cv::threshold(grayscale, thres, 80, 150, cv::THRESH_BINARY);
What thres looks like:
// Find the contours of the lungs in the thresholded image
std::vector<std::vector<cv::Point> > contours;
cv::findContours(thres, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
// Fill the areas of the lungs with BLUE for better visualization
cv::Mat lungs = input.clone();
for (size_t i = 0; i < contours.size(); i++)
{
std::vector<cv::Point> cnt = contours[i];
double area = cv::contourArea(cv::Mat(cnt));
if (area > 15000 && area < 35000)
{
std::cout << "* Area: " << area << std::endl;
cv::drawContours(lungs, contours, i, cv::Scalar(255, 0, 0),
CV_FILLED, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
}
}
What lungs looks like:
// Using the image with blue lungs as a mask, we create a new image containing only the lungs
cv::Mat blue_mask = cv::Mat::zeros(input.size(), CV_8UC1);
cv::inRange(lungs, cv::Scalar(255, 0, 0), cv::Scalar(255, 0, 0), blue_mask);
cv::Mat output;
input.copyTo(output, blue_mask);
What output looks like:
At this point you have the lungs isolated in the image and can proceed to execute other filter operations to isolate the nodules.
Good luck.
Try this:
% dp6BK.png is your original image, I downloaded directly
I = im2double(imread('dp6BK.png'));
I=I(:,:,1);
imshow(I)
cropped = I(50:430,8:500); %% Crop region of interest
thresholded = cropped < 0.35; %% Threshold to isolate lungs
clearThresh = imclearborder(thresholded); %% Remove border artifacts in image
Liver = bwareaopen(clearThresh,100); %% Remove objects less than 100 pixels
Liver1 = imfill(Liver,'hole'); % fill in the vessels inside the lungs
figure,imshow(Liver1.*cropped)
What you will get:
Related
In CImg, I have split an RGBA image apart into multiple single-channel images, with code like:
CImg<unsigned char> input("foo.png");
CImg<unsigned char> r = input.get_channel(0), g = input.get_channel(1), b = input.get_channel(2), a = input.get_channel(3);
Then I try to swizzle the channel order:
CImg<unsigned char> output(input.width(), input.height(), 1, input.channels());
output.channel(0) = g;
output.channel(1) = b;
output.channel(2) = r;
output.channel(3) = a;
When I save the image out, however, it turns out grayscale, apparently based on the alpha channel value; for example, this input:
becomes this output:
How do I specify the image color format so that CImg saves into the correct color space?
Simply copying a channel does not work like that; a better approach is to copy the pixel data with std::copy:
std::copy(g.begin(), g.end(), &output.atX(0, 0, 0, 0));
std::copy(b.begin(), b.end(), &output.atX(0, 0, 0, 1));
std::copy(r.begin(), r.end(), &output.atX(0, 0, 0, 2));
std::copy(a.begin(), a.end(), &output.atX(0, 0, 0, 3));
This results in an output image like:
My final purpose is to calculate moments of all connected regions in image.
Problem: Regions storage methods in OpenCV and Matlab are different. It is clear that moments of regions and contours are different too.
So if I want to get result of extracting regions in Matlab:
test = imread('test.bmp');
l = bwlabel(test, 8);
it is necessary follow code (OpenCV):
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
Mat test = imread("..\\test.bmp", CV_LOAD_IMAGE_UNCHANGED);
findContours(test , contours, hierarchy, CV_RETR_CCOMP , CV_CHAIN_APPROX_NONE);
for ( int i = 0; i < contours.size(); i++ )
{
if (hierarchy[i][3] > -1)
continue;
Mat imProcessing = Mat::zeros(test .size(), test .type());
drawContours(imProcessing, contours, i, Scalar(255), CV_FILLED, 8, hierarchy, 1);
// now imProcessing contain connected region, not contour!
}
Is there a more efficient way?
I am new in openCV, I already detect edge of paper sheet but my result image is blurred after draw lines on edge, How I can draw lines on edges of paper sheet so my image quality remain unaffected.
what I am Missing..
My code is below.
Many thanks.
-(void)forOpenCV
{
if( imageView.image != nil )
{
cv::Mat greyMat=[self cvMatFromUIImage:imageView.image];
vector<vector<cv::Point> > squares;
cv::Mat img= [self debugSquares: squares: greyMat ];
imageView.image =[self UIImageFromCVMat: img];
}
}
- (cv::Mat) debugSquares: (std::vector<std::vector<cv::Point> >) squares : (cv::Mat &)image
{
NSLog(#"%lu",squares.size());
// blur will enhance edge detection
Mat blurred(image);
medianBlur(image, blurred, 9);
Mat gray0(image.size(), CV_8U), gray;
vector<vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&image, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
NSLog(#"%lu",squares.size());
for( size_t i = 0; i < squares.size(); i++ )
{
cv:: Rect rectangle = boundingRect(Mat(squares[i]));
if(i==squares.size()-1)////Detecting Rectangle here
{
const cv::Point* p = &squares[i][0];
int n = (int)squares[i].size();
NSLog(#"%d",n);
line(image, cv::Point(507,418), cv::Point(507+1776,418+1372), Scalar(255,0,0),2,8);
polylines(image, &p, &n, 1, true, Scalar(255,255,0), 5, CV_AA);
fx1=rectangle.x;
fy1=rectangle.y;
fx2=rectangle.x+rectangle.width;
fy2=rectangle.y+rectangle.height;
line(image, cv::Point(fx1,fy1), cv::Point(fx2,fy2), Scalar(0,0,255),2,8);
}
}
return image;
}
Instead of
Mat blurred(image);
you need to do
Mat blurred = image.clone();
Because the first line does not copy the image, but just creates a second pointer to the same data.
When you blurr the image, you are also changing the original.
What you need to do instead is, to create a real copy of the actual data and operate on this copy.
The OpenCV reference states:
by using a copy constructor or assignment operator, where on the right side it can
be a matrix or expression, see below. Again, as noted in the introduction, matrix assignment is O(1) operation because it only copies the header and increases the reference counter.
Mat::clone() method can be used to get a full (a.k.a. deep) copy of the matrix when you need it.
The first problem is easily solved by doing the entire processing on a copy of the original image. That way, after you get all the points of the square you can draw the lines on the original image and it will not be blurred.
The second problem, which is cropping, can be solved by defining a ROI (region of interested) in the original image and then copying it to a new Mat. I've demonstrated that in this answer:
// Setup a Region Of Interest
cv::Rect roi;
roi.x = 50
roi.y = 10
roi.width = 400;
roi.height = 450;
// Crop the original image to the area defined by ROI
cv::Mat crop = original_image(roi);
cv::imwrite("cropped.png", crop);
I have an image in Matlab:
img = imopen('image.jpg')
which returns an uint8 array height x width x channels (3 channels: RGB).
Now I want to use openCV to do some manipulations on it, so I write up a MEX file which takes the image as a parameter and constructs an IplImage from it:
#include "mex.h"
#include "cv.h"
void mexFunction(int nlhs, mxArray **plhs, int nrhs, const mxArray **prhs) {
char *matlabImage = (char *)mxGetData(prhs[0]);
const mwSize *dim = mxGetDimensions(prhs[0]);
CvSize size;
size.height = dim[0];
size.width = dim[1];
IplImage *iplImage = cvCreateImageHeader(size, IPL_DEPTH_8U, dim[2]);
iplImage->imageData = matlabImage;
iplImage->imageDataOrigin = iplImage->imageData;
/* Show the openCV image */
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
cvShowImage("mainWin", iplImage);
}
This result looks completely wrong, because openCV uses other conventions than matlab for storing an image (for instance, they interleave the color channels).
Can anyone explain what the differences in conventions are and give some pointers on how to display the image correctly?
After spending the day doing fun image format conversions </sarcasm> I can now answer my own question.
Matlab stores images as 3 dimensional arrays: height × width × color
OpenCV stores images as 2 dimensional arrays: (color × width) × height
Furthermore, for best performance, OpenCV pads the images with zeros so rows are always aligned on 32 bit blocks.
I've done the conversion in Matlab:
function [cv_img, dim, depth, width_step] = convert_to_cv(img)
% Exchange rows and columns (handles 3D cases as well)
img2 = permute( img(:,end:-1:1,:), [2 1 3] );
dim = [size(img2,1), size(img2,2)];
% Convert double precision to single precision if necessary
if( isa(img2, 'double') )
img2 = single(img2);
end
% Determine image depth
if( ndims(img2) == 3 && size(img2,3) == 3 )
depth = 3;
else
depth = 1;
end
% Handle color images
if(depth == 3 )
% Switch from RGB to BGR
img2(:,:,[3 2 1]) = img2;
% Interleave the colors
img2 = reshape( permute(img2, [3 1 2]), [size(img2,1)*size(img2,3) size(img2,2)] );
end
% Pad the image
width_step = size(img2,1) + mod( size(img2,1), 4 );
img3 = uint8(zeros(width_step, size(img2,2)));
img3(1:size(img2,1), 1:size(img2,2)) = img2;
cv_img = img3;
% Output to openCV
cv_display(cv_img, dim, depth, width_step);
The code to transform this into an IplImage is in the MEX file:
#include "mex.h"
#include "cv.h"
#include "highgui.h"
#define IN_IMAGE prhs[0]
#define IN_DIMENSIONS prhs[1]
#define IN_DEPTH prhs[2]
#define IN_WIDTH_STEP prhs[3]
void mexFunction(int nlhs, mxArray **plhs, int nrhs, const mxArray **prhs) {
bool intInput = true;
if(nrhs != 4)
mexErrMsgTxt("Usage: cv_disp(image, dimensions, depth, width_step)");
if( mxIsUint8(IN_IMAGE) )
intInput = true;
else if( mxIsSingle(IN_IMAGE) )
intInput = false;
else
mexErrMsgTxt("Input should be a matrix of uint8 or single precision floats.");
if( mxGetNumberOfElements(IN_DIMENSIONS) != 2 )
mexErrMsgTxt("Dimension vector should contain two elements: [width, height].");
char *matlabImage = (char *)mxGetData(IN_IMAGE);
double *imgSize = mxGetPr(IN_DIMENSIONS);
size_t width = (size_t) imgSize[0];
size_t height = (size_t) imgSize[1];
size_t depth = (size_t) *mxGetPr(IN_DEPTH);
size_t widthStep = (size_t) *mxGetPr(IN_WIDTH_STEP) * (intInput ? sizeof(unsigned char):sizeof(float));
CvSize size;
size.height = height;
size.width = width;
IplImage *iplImage = cvCreateImageHeader(size, intInput ? IPL_DEPTH_8U:IPL_DEPTH_32F, depth);
iplImage->imageData = matlabImage;
iplImage->widthStep = widthStep;
iplImage->imageDataOrigin = iplImage->imageData;
/* Show the openCV image */
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
cvShowImage("mainWin", iplImage);
}
You could optimize your program with mxGetDimensions and mxGetNumberOfDimensions to get the size of the image and use the mxGetClassID to determine the depth more accurately.
I wanted to do the same but I think it would be better to do this using matlab dll and calllib. I would not do the transformation of the image in opencv format not in matlab because it would be slow. This is one of the biggest problems with matlab openCV. I think opencv2.2 has some good solutions for that problem. It looks like there are some solutions like that done from opencv community for octave but I still don't understand them. They are somehow using the c++ functionality of opencv.
Try using the library developed by Kota Yamaguchi:
http://github.com/kyamagu/mexopencv
It defines a class called 'MxArray' that can perform all types of conversions from MATLAB mxArray variables to OpenCV objects (and from OpenCV to MATLAB). For example, this library can convert between mxArray and cv::Mat data types. Btw, IplImage is not relevant anymore if you use C++ API of OpenCV, it's better to use cv::Mat instead.
Note: if using the library, make sure to compile your mex function with MxArray.cpp file from the library; you can do so in MATLAB command line with:
mex yourmexfile.cpp MxArray.cpp
Based on the answer and How the image matrix is stored in the memory on OpenCV, we can make it with Opencv Mat operation only!
C++: Mat::Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0)
C++: void merge(const Mat* mv, size_t count, OutputArray dst)
Then the mex C/C++ code is:
#include "mex.h"
#include <opencv2/opencv.hpp>
#define uint8 unsigned char
void mexFunction(int nlhs, mxArray *out[], int nrhs, const mxArray *input[])
{
// assume the type of image is uint8
if(!mxIsClass(input[0], "uint8"))
{
mexErrMsgTxt("Only image arrays of the UINT8 class are allowed.");
return;
}
uint8* rgb = (uint8*) mxGetPr(input[0]);
int* dims = (int*) mxGetDimensions(input[0]);
int height = dims[0];
int width = dims[1];
int imsize = height * width;
cv::Mat imR(1, imsize, cv::DataType<uint8>::type, rgb);
cv::Mat imG(1, imsize, cv::DataType<uint8>::type, rgb+imsize);
cv::Mat imB(1, imsize, cv::DataType<uint8>::type, rgb+imsize + imsize);
// opencv is BGR and matlab is column-major order
cv::Mat imA[3];
imA[2] = imR.reshape(1,width).t();
imA[1] = imG.reshape(1,width).t();
imA[0] = imB.reshape(1,width).t();
// done! imf is what we want!
cv::Mat imf;
merge(imA,3,imf);
}
I have an image with something inside in a white backround. I want to save that image in a format that allows alpha channel or using an alpha mask in a way that the white pixels became transparents. Any light out there?
I don't know of any libraries where this is super easy. But, there's a lot of relevant sample code in the GLImageProcessing example here. (I haven't run the following)
UIImage *some_image = [UIImage imageNamed:#"somethin'.tiff"];
CGImageRef cg_image = some_image.CGImage;
CFDataRef data = CGDataProviderCopyData(CGImageGetDataProvider(cg_image));
size_t bpp = CGImageGetBitsPerPixel(CGImage);
uint32_t *stuff = (uint32_t *)CFDataGetBytePtr(data);
int w = CGImageGetWidth(CGImage);
int h = CGImageGetHeight(CGImage);
int N = w * h;
for (int i = 0; i < N; i++ ) {
// do your stuff, test for white, set the alpha mask
stuff[i] = stuff[i] & ((uint32_t)0xFFFFFFFF | alpha_mask);
}
You could instead use this function
UIKIT_EXTERN NSData *UIImagePNGRepresentation(UIImage *image);
and write the data to disk. I hope this helps. Post the solution if you find it...