MSER circles center - mser

I`m using MSER feature detector to detect all
the circles from an image and it works perfect
but i have to get the center of those circles.
Do you know any posibility to get the center?
Here is the source code:
void main()
{
Mat inImg = imread(CProfilessuro1012Desktop1.bmp);
Mat textImg;
cvtColor(inImg, textImg, CV_BGR2GRAY);
vector vector Point contours;
vector Rect bboxes;
Ptr MSER mser = MSERcreate(22, (int)(0.00001textImg.colstextImg.rows), (int)(0.00015textImg.colstextImg.rows), 1, 1);
mser-detectRegions(textImg, contours, bboxes);
for(int i=0;i1;i++)
{ for(int j=0;jcontours[i].size();j++)
cout x=contours[i][j].x y=contours[i][j].y endl;
coutendl;
}
for (int i = 0; i bboxes.size(); i++)
{
cout x=bboxes[i].x y=bboxes[i].y endl;
rectangle(inImg, bboxes[i], CV_RGB(0, 255, 0));
}
cout contours[0].size()endl;
imshow(, inImg);
waitKey(0);
}
What I did:
float sumX = 0, sumY = 0;
int size = contours.size();
Point2f centroid;
if(size > 0){
for(int i=0;i<size;i++)
{
for(int j=0;j<contours[i].size();j++)
{
sumX += contours[i][j].x;
sumY += contours[i][j].y;
}
centroid.x = sumX/contours[i].size();
centroid.y = sumY/contours[i].size();
cout<<centroid.x<<" " <<centroid.y<<endl;
sumX=0;
sumY=0;
}

You can draw circles enclosing contours.
(x, y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
For more Information, you can refer to https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html

Related

How to optimize flutter CameraImage to TensorImage?

That function is too slow. So Flutter CameraImage efficiency convert to TensorImage in dart?
var img = imglib.Image(image.width, image.height); // Create Image buffer
Plane plane = image.planes[0];
const int shift = (0xFF << 24);
// Fill image buffer with plane[0] from YUV420_888
for (int x = 0; x < image.width; x++) {
for (int planeOffset = 0;
planeOffset < image.height * image.width;
planeOffset += image.width) {
final pixelColor = plane.bytes[planeOffset + x];
// color: 0x FF FF FF FF
// A B G R
// Calculate pixel color
var newVal =
shift | (pixelColor << 16) | (pixelColor << 8) | pixelColor;
img.data[planeOffset + x] = newVal;
}
}
return img;
}```
Seems your for loop is inefficient. The data for whole row (with same placeOffset, different x) will be cached at once, so would be faster to switch ordering of the two loops.
for (int y = 0; y < image.height; y++) {
for (int x = 0; x < image.width; x++) {
final pixelColor = plane.bytes[y * image.width + x];
// ...
}
}
However, your code does not seems to be reading from the actual camera stream. please refer this thread for converting CameraImage to Image.
How to convert Camera Image to Image in Flutter?

Filled rect disappear after mouse releasing

I’m new to processing. I am looking to write a program in which I select an area of an image ( like “rect selection” in Photoshop for example…), this area has a red stroke and is slightly opaque, once the area is selected, the rectangle fills with the average color of the pixels of the same area and the red stroke turns off. The idea is to be able to repeat this action several times on the same image. When I released the mouse, the fill is erased because of background(buff); in void draw();. I would like the rectangle filled with the new color to be saved. I think I need to use an array, class, but I don’t understand how these work. If someone is able to help me, it would be a great help. Thank you.
PImage buff1;
int x1,y1,x2,y2,h1,h2;
void setup()
{
size(1000, 721);
buff1 = loadImage("buff1.jpg2);
background(buff1);
}
color extractColorFromImage(final PImage buff1) {
buff1.loadPixels();
color r = 1, g = 1, b = 1;
for (final color c : buff1.pixels) {
r += c >> 020 & 255;
g += c >> 010 & 255;
b += c & 255;
}
r /= buff1.pixels.length;
g /= buff1.pixels.length;
b /= buff1.pixels.length;
return color(r, g, b);
}
void draw()
{
background(buff1);
rectMode(CORNERS);
stroke(255,0,0);
strokeWeight(2);
strokeJoin(ROUND);
rect(x1,y1,x2,y2,2);
fill(255,0,0,50);
noStroke();
cursor(ARROW);
}
void mousePressed()
{
x1 = mouseX;
y1 = mouseY;
}
void mouseDragged()
{
x2 = mouseX;
y2 = mouseY;
}
void mouseReleased()
{
int H1 = abs(1+x2-x1);
int H2 = abs(1+y2-y1);
for (int i=0; i<width; i+=H1)
{
for (int j=0; j<height; j+=H2)
{
PImage newImg = buff1.get(x1, y1, H1, H2);
fill(extractColorFromImage(newImg), 40);
noStroke();
cursor(ARROW);
}
}
}
Once the pixel data for an image have been loaded by loadPixels(), the loaded pixels can be accessed and changed by pixels[].
updatePixels() updates the image with the data in its pixels[] array:
void mouseReleased()
{
int x_1 = min(x1,x2);
int y_1 = min(y1,y2);
int x_2 = max(x1,x2);
int y_2 = max(y1,y2);
PImage newImg = buff1.get(x_1, y_1, x_2-x1+1, y_2-y1+1);
color new_color = extractColorFromImage(newImg);
buff1.loadPixels();
for (int i = x_1; i <= x_2; i++)
{
for (int j = y_1; j <= y_2; j++)
{
buff1.pixels[j*buff1.width+i] = new_color;
}
}
buff1.updatePixels();
}
When the mouse button is pressed, then I recommend to set (x2, y2), too:
void mousePressed()
{
x1 = mouseX;
y1 = mouseY;
x2 = x1;
y2 = y1;
}
Optionally the original color can be mixed with the new color by lerpColor():
PImage newImg = buff1.get(x_1, y_1, x_2-x1+1, y_2-y1+1);
color new_color = extractColorFromImage(newImg);
buff1.loadPixels();
for (int i = x_1; i <= x_2; i++)
{
for (int j = y_1; j <= y_2; j++)
{
color orig_color = buff1.pixels[j*buff1.width+i];
buff1.pixels[j*buff1.width+i] = lerpColor(orig_color, new_color, 0.5);
}
}
buff1.updatePixels();

Get all the points in triangular ROI in a xy plane

Input: I have some 50000 points in a xy plane as shown in the below picture.
Now, I need to get all the possible points in a triangular ROI. How to get it. It can can be opencv or Matlab.
The below is the sample where I need to get the possible points of the triangular areas.
MATLAB has an inpolygon command: inpolygon.
For example, this code
xv = [0.1 0.4 0.15 0.1]; yv = [0 0.4 0.8 0];
x = rand(250,1); y = rand(250,1);
in = inpolygon(x,y,xv,yv);
plot(xv,yv,x(in),y(in),'r+',x(~in),y(~in),'bo')
generates a picture like this:
Something like this, in c++?
Mat plane = imread("plane.png"); // this is the 50 000 pixel plane image
// I assume your triangles are here. E.e. findContours function would save them here, but I don't know your situation.
vector<vector<Point> > triangles;
// this size is something like 1000, so here are only points that should be checkedd if they are inside some triangle
vector<Point> points;
// lets loop all rois and check if point is inside one of them
for (int j = 0; j < triangles.size(); j++) {
for (int i = 0; i < points.size(); i++) {
double test = pointPolygonTest(trigangles[j], points[i] false);
if (test < 0) {
cout << " point " << points[i] << " is outside of triangle " << j << endl;
} else if (test > 0) {
cout << " point " << points[i] << " is inside of triangle " << j << endl;
} else if (test == 0) {
cout << " point " << points[i] << " is on edge of triangle " << j << endl;
}
}
}
for more info: http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=pointpolygontest#pointpolygontest
here is OpenCV's example: http://docs.opencv.org/trunk/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.html
In OpenCV, you can quickly filter out the points that are not in the minimal bounding rectangle for each triangle. This rectangle can be previously computed by hand or with cv::boundingRect(). The hit test is done with Rect::contains(). This is a fast operation (at least much faster than cv::pointPolygonTest)and this will filter out the points that are obviously not in any triangle. Afterward, you test the points that pass the filter with cv::pointPolygonTest().
That is:
std::vector<cv::Point> pointList;
std::vector<cv::Rect> rectList;
std::vector<std::vector<Point>> triangleList;
for (int pointIndex = 0; pointIndex < pointList.size(); pointIndex++)
{
for (int rectIndex = 0; rectIndex < rectList.size(); rectIndex++)
{
if (!rectList[rectIndex].contains(pointList[pointIndex]))
continue;
if (cv::pointPolygonTest(triangleList[rectIndex], pointList[pointIndex], false) < 0)
continue;
/* TODO Mark the point for future use */
}
}

imregionalmax matlab function's equivalent in opencv

I have an image of connected components(circles filled).If i want to segment them i can use watershed algorithm.I prefer writing my own function for watershed instead of using the inbuilt function in OPENCV.I have successfu How do i find the regionalmax of objects using opencv?
I wrote a function myself. My results were quite similar to MATLAB, although not exact. This function is implemented for CV_32F but it can easily be modified for other types.
I mark all the points that are not part of a minimum region by checking all the neighbors. The remaining regions are either minima, maxima or areas of inflection.
I use connected components to label each region.
I check each region for any point belonging to a maxima, if yes then I push that label into a vector.
Finally I sort the bad labels, erase all duplicates and then mark all the points in the output as not minima.
All that remains are the regions of minima.
Here is the code:
// output is a binary image
// 1: not a min region
// 0: part of a min region
// 2: not sure if min or not
// 3: uninitialized
void imregionalmin(cv::Mat& img, cv::Mat& out_img)
{
// pad the border of img with 1 and copy to img_pad
cv::Mat img_pad;
cv::copyMakeBorder(img, img_pad, 1, 1, 1, 1, IPL_BORDER_CONSTANT, 1);
// initialize binary output to 2, unknown if min
out_img = cv::Mat::ones(img.rows, img.cols, CV_8U)+2;
// initialize pointers to matrices
float* in = (float *)(img_pad.data);
uchar* out = (uchar *)(out_img.data);
// size of matrix
int in_size = img_pad.cols*img_pad.rows;
int out_size = img.cols*img.rows;
int x, y;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
neighborCheck(in, out, i, x, y, img_pad.cols); // all regions are either min or max
}
cv::Mat label;
cv::connectedComponents(out_img, label);
int* lab = (int *)(label.data);
in = (float *)(img.data);
in_size = img.cols*img.rows;
std::vector<int> bad_labels;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
if (lab[i] != 0) {
if (neighborCleanup(in, out, i, x, y, img.rows, img.cols) == 1) {
bad_labels.push_back(lab[i]);
}
}
}
std::sort(bad_labels.begin(), bad_labels.end());
bad_labels.erase(std::unique(bad_labels.begin(), bad_labels.end()), bad_labels.end());
for (int i = 0; i < out_size; ++i) {
if (lab[i] != 0) {
if (std::find(bad_labels.begin(), bad_labels.end(), lab[i]) != bad_labels.end()) {
out[i] = 0;
}
}
}
}
int inline neighborCleanup(float* in, uchar* out, int i, int x, int y, int x_lim, int y_lim)
{
int index;
for (int xx = x - 1; xx < x + 2; ++xx) {
for (int yy = y - 1; yy < y + 2; ++yy) {
if (((xx == x) && (yy==y)) || xx < 0 || yy < 0 || xx >= x_lim || yy >= y_lim)
continue;
index = xx*y_lim + yy;
if ((in[i] == in[index]) && (out[index] == 0))
return 1;
}
}
return 0;
}
void inline neighborCheck(float* in, uchar* out, int i, int x, int y, int x_lim)
{
int indexes[8], cur_index;
indexes[0] = x*x_lim + y;
indexes[1] = x*x_lim + y+1;
indexes[2] = x*x_lim + y+2;
indexes[3] = (x+1)*x_lim + y+2;
indexes[4] = (x + 2)*x_lim + y+2;
indexes[5] = (x + 2)*x_lim + y + 1;
indexes[6] = (x + 2)*x_lim + y;
indexes[7] = (x + 1)*x_lim + y;
cur_index = (x + 1)*x_lim + y+1;
for (int t = 0; t < 8; t++) {
if (in[indexes[t]] < in[cur_index]) {
out[i] = 0;
break;
}
}
if (out[i] == 3)
out[i] = 1;
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
I do not know if it is what you want, but in my answer to this post, I gave some code to find local maxima (peaks) in a grayscale image (resulting from distance transform).
The approach relies on subtracting the original image from the dilated image and finding the zero pixels).
I hope it helps,
Good luck
I had the same problem some time ago, and the solution was to reimplement the imregionalmax algorithm in OpenCV/Cpp. It is not that complicated, because you can find the C++ source code of the function in the Matlab distribution. (somewhere in toolbox). All you have to do is to read carefully and understand the algorithm described there. Then rewrite it or remove the matlab-specific checks and you'll have it.

Transformation of coordinates, Concept

i want to convert x,y,z coordinates to polar coordinates. I am getting (-) in y coordiantes. Can someone explain me why I am getting it. It would be great help.
I am reading these values (xyz , az_elev_r) from a software and can't be changed.I am just not sure of the order of angles( az & elev). Using my code I get -y instead of y. It means there is 180 rotation.My code is:
xyz=[-0.564 3.689 -0.735;
2.011 5.067 -1.031;
-1.181 3.943 -1.825; % Reference values
];
%% az_elev_r-->xyz
az_elev_r=[ 261.30 -11.24 3.80;
291.65 -10.692 5.548;
253.34 -23.897 4.50]; % Also Reference (degree)
az_elev_r(:,1:2)=deg2rad(az_elev_r(:,1:2));
r=az_elev_r(:,3);
az=az_elev_r(:,1);
elev=az_elev_r(:,2);
x=r.*cos(az).*cos(elev)
y=r.*sin(az).*cos(elev)
z=r.*sin(elev)
Your az_elev_r matrix is not consistent with your xyz reference.
>> [az, el, r] = cart2sph(xyz(:,1), xyz(:,2), xyz(:,3));
>> rad2deg(az)
ans =
98.6924675475501
68.3527736950233
106.673911589314
Your answers are consistent with the values returned by the sph2cart function. (Example starts with your original input, before the dec2rad replacement.
>> [x, y, z] = sph2cart(deg2rad(az_elev_r(:,1)), deg2rad(az_elev_r(:,2)), az_elev_r(:,3))
x =
-0.563766229670505
2.01131973806906
-1.17951822049783
y =
-3.68422880893852
-5.06709019311118
-3.94153436658676
z =
-0.740692730942158
-1.02931719412937
-1.82292172199717
Incidentally, you're code will be more readable if you just use the sph2cart function, and work in radians, unless you are trying to understand the conversions for their own sake.
OpenCV has the code for conversion to polar coordinates and back. This conversion is useful for finding object rotation through correlation or otherwise creating object-centred 'rotation-independent' representation of objects. It is useful to visualize each of the polar coordinates as well as their joint image. The images below should be self_explanatory. The polar plot has angle as a horizontal axis and Radius as a vertical axis, so that 4 peaks correspond to the 4 corners of the input image. The code (C++ with OpenCV) is attached.
//================================
// Name : PolarCoord.cpp
// Author : V.Ivanchenko cudassimo#gmail.com
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//======================================
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define VALID(x, y, w, h) ((x)>=0 && (y)>=0 && (x)<(w) && (y)<(h)) // validates index
/*
* 1. Original binary image HxW CV_8U
* |
* |
* V
* 2. Two coordinate Mats HxW CV_32F
* |
* |
* V
* 3. Visualization CV_8U
* a. gray HxW for a single coordinate image
* b. binary Rx360 for two coordinate images
*/
// convert a binary 2D image into two Mats with float coordiantes
void imageToCoord(const Mat& img, Mat& X, Mat& Y, bool centered = true) {
if (img.empty())
return;
int h = img.rows;
int w = img.cols;
X.create(h, w, CV_32F);
Y.create(h, w, CV_32F);
float Cx = w/2.0f;
float Cy = h/2.0f;
for (int i=0; i<h; ++i){
const uchar* img_row = img.ptr<uchar>(i);
float* x_row = X.ptr<float>(i);
float* y_row = Y.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (img_row[j]>0) {
float x = j;
float y = i;
if (centered) {
x-=Cx;
y-=Cy;
}
x_row[j] = x;
y_row[j] = y;
}
} // j
} // i
} //imageToCoord()
// convert a single float ploar coord Mat to a gray image
void polarToImg(const Mat& PolarCoord, Mat& img) {
if (PolarCoord.empty())
return;
int h = PolarCoord.rows;
int w = PolarCoord.cols;
img.create(h, w, CV_8U);
float maxVal = std::numeric_limits<float>::min();
// find maxVal
for (int i=0; i<h; ++i){
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (maxVal < x_row[j])
maxVal = x_row[j];
} // j
} // i
// create an image
if (maxVal>0) {
float k = 255.0/maxVal;
for (int i=0; i<h; ++i){
uchar* img_row = img.ptr<uchar>(i);
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
img_row[j] = saturate_cast<uchar>(k*x_row[j]);
}// j
} // i
} // if
} // plarToImg()
// convert two polar coord Mats to a binary image
void polarToImg(const Mat& radius, const Mat& angle, Mat& img) {
if (angle.empty() || radius.empty())
return;
int h = angle.rows;
int w = angle.cols;
assert(radius.cols==w && radius.rows==h);
const int imgH = sqrt(h*h+w*w)+0.5f; // radius
const int imgW = 360; // angle, deg
img.create(imgH, imgW, CV_8U);
// create an image
for (int i=0; i<h; ++i){
const float* ang_row = angle.ptr<float>(i);
const float* r_row = radius.ptr<float>(i);
for (int j=0; j<w; ++j) {
int x = ang_row[j] + 0.5f;
int y = r_row[j] + 0.5f;
if (x>0) {
cout<<x<<endl;
}
if (VALID(x, y, imgW, imgH))
img.at<uchar>(y, x) = 255;
else {
cout<<"Invalid x, y: "<<x<<", "<<y<<endl;
}
}// j
} // i
} // plarToImg()
int main() {
cout << "Cartesian to polar" << endl; // prints "Syntax training in openCV"
const int W=400, H=400;
Mat Minput(H, W, CV_8U);
Minput(Rect(W/4, H/4, W/2, H/2)) = 255;
Mat X, Y, Angle, Radius, Mr, Mang, Mpolar;
// processing
imageToCoord(Minput, X, Y); // extract coordinates
cartToPolar(X, Y, Radius, Angle, true);// convert coordiantes
// visualize
polarToImg(Radius, Mr);
polarToImg(Angle, Mang);
polarToImg(Radius, Angle, Mpolar);
// debug
//cout<<Mpolar<<endl;
namedWindow("input", 0);
namedWindow("angle", 0);
namedWindow("radius", 0);
namedWindow("Polar", 0);
const int winw=200, winh=200;
resizeWindow("input", winw, winh);
resizeWindow("angle", winw, winh);
resizeWindow("radius", winw, winh);
resizeWindow("Polar", 360, (int)sqrt(H*H + W*W));
moveWindow("input", 0, 0);
moveWindow("angle", winw, 0);
moveWindow("radius", 2*winw, 0);
moveWindow("Polar", 3*winw, 0);
imshow("input", Minput);
imshow("angle", Mang);
imshow("radius", Mr);
imshow("Polar", Mpolar);
waitKey(-1);
return 0;
}