i want to convert x,y,z coordinates to polar coordinates. I am getting (-) in y coordiantes. Can someone explain me why I am getting it. It would be great help.
I am reading these values (xyz , az_elev_r) from a software and can't be changed.I am just not sure of the order of angles( az & elev). Using my code I get -y instead of y. It means there is 180 rotation.My code is:
xyz=[-0.564 3.689 -0.735;
2.011 5.067 -1.031;
-1.181 3.943 -1.825; % Reference values
];
%% az_elev_r-->xyz
az_elev_r=[ 261.30 -11.24 3.80;
291.65 -10.692 5.548;
253.34 -23.897 4.50]; % Also Reference (degree)
az_elev_r(:,1:2)=deg2rad(az_elev_r(:,1:2));
r=az_elev_r(:,3);
az=az_elev_r(:,1);
elev=az_elev_r(:,2);
x=r.*cos(az).*cos(elev)
y=r.*sin(az).*cos(elev)
z=r.*sin(elev)
Your az_elev_r matrix is not consistent with your xyz reference.
>> [az, el, r] = cart2sph(xyz(:,1), xyz(:,2), xyz(:,3));
>> rad2deg(az)
ans =
98.6924675475501
68.3527736950233
106.673911589314
Your answers are consistent with the values returned by the sph2cart function. (Example starts with your original input, before the dec2rad replacement.
>> [x, y, z] = sph2cart(deg2rad(az_elev_r(:,1)), deg2rad(az_elev_r(:,2)), az_elev_r(:,3))
x =
-0.563766229670505
2.01131973806906
-1.17951822049783
y =
-3.68422880893852
-5.06709019311118
-3.94153436658676
z =
-0.740692730942158
-1.02931719412937
-1.82292172199717
Incidentally, you're code will be more readable if you just use the sph2cart function, and work in radians, unless you are trying to understand the conversions for their own sake.
OpenCV has the code for conversion to polar coordinates and back. This conversion is useful for finding object rotation through correlation or otherwise creating object-centred 'rotation-independent' representation of objects. It is useful to visualize each of the polar coordinates as well as their joint image. The images below should be self_explanatory. The polar plot has angle as a horizontal axis and Radius as a vertical axis, so that 4 peaks correspond to the 4 corners of the input image. The code (C++ with OpenCV) is attached.
//================================
// Name : PolarCoord.cpp
// Author : V.Ivanchenko cudassimo#gmail.com
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//======================================
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define VALID(x, y, w, h) ((x)>=0 && (y)>=0 && (x)<(w) && (y)<(h)) // validates index
/*
* 1. Original binary image HxW CV_8U
* |
* |
* V
* 2. Two coordinate Mats HxW CV_32F
* |
* |
* V
* 3. Visualization CV_8U
* a. gray HxW for a single coordinate image
* b. binary Rx360 for two coordinate images
*/
// convert a binary 2D image into two Mats with float coordiantes
void imageToCoord(const Mat& img, Mat& X, Mat& Y, bool centered = true) {
if (img.empty())
return;
int h = img.rows;
int w = img.cols;
X.create(h, w, CV_32F);
Y.create(h, w, CV_32F);
float Cx = w/2.0f;
float Cy = h/2.0f;
for (int i=0; i<h; ++i){
const uchar* img_row = img.ptr<uchar>(i);
float* x_row = X.ptr<float>(i);
float* y_row = Y.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (img_row[j]>0) {
float x = j;
float y = i;
if (centered) {
x-=Cx;
y-=Cy;
}
x_row[j] = x;
y_row[j] = y;
}
} // j
} // i
} //imageToCoord()
// convert a single float ploar coord Mat to a gray image
void polarToImg(const Mat& PolarCoord, Mat& img) {
if (PolarCoord.empty())
return;
int h = PolarCoord.rows;
int w = PolarCoord.cols;
img.create(h, w, CV_8U);
float maxVal = std::numeric_limits<float>::min();
// find maxVal
for (int i=0; i<h; ++i){
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (maxVal < x_row[j])
maxVal = x_row[j];
} // j
} // i
// create an image
if (maxVal>0) {
float k = 255.0/maxVal;
for (int i=0; i<h; ++i){
uchar* img_row = img.ptr<uchar>(i);
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
img_row[j] = saturate_cast<uchar>(k*x_row[j]);
}// j
} // i
} // if
} // plarToImg()
// convert two polar coord Mats to a binary image
void polarToImg(const Mat& radius, const Mat& angle, Mat& img) {
if (angle.empty() || radius.empty())
return;
int h = angle.rows;
int w = angle.cols;
assert(radius.cols==w && radius.rows==h);
const int imgH = sqrt(h*h+w*w)+0.5f; // radius
const int imgW = 360; // angle, deg
img.create(imgH, imgW, CV_8U);
// create an image
for (int i=0; i<h; ++i){
const float* ang_row = angle.ptr<float>(i);
const float* r_row = radius.ptr<float>(i);
for (int j=0; j<w; ++j) {
int x = ang_row[j] + 0.5f;
int y = r_row[j] + 0.5f;
if (x>0) {
cout<<x<<endl;
}
if (VALID(x, y, imgW, imgH))
img.at<uchar>(y, x) = 255;
else {
cout<<"Invalid x, y: "<<x<<", "<<y<<endl;
}
}// j
} // i
} // plarToImg()
int main() {
cout << "Cartesian to polar" << endl; // prints "Syntax training in openCV"
const int W=400, H=400;
Mat Minput(H, W, CV_8U);
Minput(Rect(W/4, H/4, W/2, H/2)) = 255;
Mat X, Y, Angle, Radius, Mr, Mang, Mpolar;
// processing
imageToCoord(Minput, X, Y); // extract coordinates
cartToPolar(X, Y, Radius, Angle, true);// convert coordiantes
// visualize
polarToImg(Radius, Mr);
polarToImg(Angle, Mang);
polarToImg(Radius, Angle, Mpolar);
// debug
//cout<<Mpolar<<endl;
namedWindow("input", 0);
namedWindow("angle", 0);
namedWindow("radius", 0);
namedWindow("Polar", 0);
const int winw=200, winh=200;
resizeWindow("input", winw, winh);
resizeWindow("angle", winw, winh);
resizeWindow("radius", winw, winh);
resizeWindow("Polar", 360, (int)sqrt(H*H + W*W));
moveWindow("input", 0, 0);
moveWindow("angle", winw, 0);
moveWindow("radius", 2*winw, 0);
moveWindow("Polar", 3*winw, 0);
imshow("input", Minput);
imshow("angle", Mang);
imshow("radius", Mr);
imshow("Polar", Mpolar);
waitKey(-1);
return 0;
}
Related
I am trying to create a simple Bingo game and want to make sure the numbers are not repeating on the bingo card. I have a random number generator, but for some reason the code I'm using doesn't work as the same numbers will constantly repeat. Could somebody please take a look at my code below and either tell me what I need to fix or fix the code for me?
public Grid(int width, int height, float cellSize)
{
this.width = width;
this.height = height;
this.cellSize = cellSize;
gridArray = new int[width, height];
debugTextArray = new TextMesh[width, height];
for (int x = 0; x < gridArray.GetLength(0); x++)
{
for (int y = 0; y < gridArray.GetLength(1); y++)
{
debugTextArray[x, y] = UtilsClass.CreateWorldText(gridArray[x, y].ToString(), null, GetWorldPosition(x, y) + new Vector3(cellSize, cellSize) * .5f, 20, Color.white, TextAnchor.MiddleCenter);
Debug.DrawLine(GetWorldPosition(x, y), GetWorldPosition(x, y + 1), Color.white, 100f);
Debug.DrawLine(GetWorldPosition(x, y), GetWorldPosition(x + 1, y), Color.white, 100f);
}
}
Debug.DrawLine(GetWorldPosition(0, height), GetWorldPosition(width, height), Color.white, 100f);
Debug.DrawLine(GetWorldPosition(width, 0), GetWorldPosition(width, height), Color.white, 100f);
for (int x = 0; x <= 4; x++)
{
RandomValue(0, x);
RandomValue(1, x);
RandomValue(2, x);
RandomValue(3, x);
RandomValue(4, x);
}
}
private Vector3 GetWorldPosition(int x, int y)
{
return new Vector3(x, y) * cellSize;
}
public void RandomValue(int x, int y)
{
if (x >= 0 && y >= 0 && x < width && y < height)
{
list = new List<int>(new int[Lenght]);
for (int j = 0; j < 25; j++)
{
Rand = UnityEngine.Random.Range(1, 50);
while (list.Contains(Rand))
{
Rand = UnityEngine.Random.Range(1, 50);
}
list[j] = Rand;
gridArray[x, y] = list[j];
}
debugTextArray[x, y].text = gridArray[x, y].ToString();
debugTextArray[2, 2].text = "Free";
}
}
Basically your concept in function RandomValue() is correct, but problem is it only check in same column, so you have to bring the concept of RandomValue() to Grid() level. You need a List contain all approved value, then check Contains() at Grid().
But in fact you can do it in all one go.
Make sure your width*height not larger than maxValue.
Dictionary<Vector2Int, int> CreateBingoGrid(int width, int height, int maxValue)
{
var grid = new Dictionary<Vector2Int, int>();
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
var num = Random.Range(1, maxValue);
while (grid.ContainsValue(num))
{
num = Random.Range(1, maxValue);
}
grid.Add(new Vector2Int(x, y), num);
}
}
return grid;
}
As mentioned in the comment on your question, it's probably the easiest to just shuffle the numbers in the range [1,50] and then take the first 25 or however many you want.
The reason your code isn't working properly and you see a lot of repeats is because you're calling the RandomValue() function multiple separate times and the list variable you're comparing against if a value is already on the chart is inside of that function. Meaning that it will only ever check the values it has generated in that call, in this case meaning only for one row.
Also, if you make a list that you know will always be the same size, you should use an array instead. Lists are for when you want the size to be adjustable.
Solution 1:
A very simple way to generate an array with the numbers 1-50 would be to do this:
//Initialize Array
int[] numbers = new int[50];
for (int i = 1; i <= numbers.Length; i++)
{
numbers[i] = i;
}
//Shuffle Array
for (int i = 0; i < numbers.Length; i++ )
{
int tmp = numbers[i];
int r = Random.Range(i, numbers.Length);
numbers[i] = numbers[r];
numbers[r] = tmp;
}
//Get first 'n' numbers
int[] result = Array.Copy(numbers, 0, result, 0, n);
return result;
I'm not sure if it's the most efficient way, but it would work.
Solution 2:
To change your code to check against the entire list, I would change this section:
for (int x = 0; x <= 4; x++)
{
RandomValue(0, x);
RandomValue(1, x);
RandomValue(2, x);
RandomValue(3, x);
RandomValue(4, x);
}
To something like this:
List<int> values = new List<int>();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
int r = RandomValue(1, 50);
while (values.Contains(r))
{
r = RandomValue(1, 50);
}
values[y * width + x].Add(r);
gridArray[x, y] = r;
}
}
int RandomValue(int min, int max) {
return UnityEngine.Random.Range(min, max);
}
Hope this helps!
I’m new to processing. I am looking to write a program in which I select an area of an image ( like “rect selection” in Photoshop for example…), this area has a red stroke and is slightly opaque, once the area is selected, the rectangle fills with the average color of the pixels of the same area and the red stroke turns off. The idea is to be able to repeat this action several times on the same image. When I released the mouse, the fill is erased because of background(buff); in void draw();. I would like the rectangle filled with the new color to be saved. I think I need to use an array, class, but I don’t understand how these work. If someone is able to help me, it would be a great help. Thank you.
PImage buff1;
int x1,y1,x2,y2,h1,h2;
void setup()
{
size(1000, 721);
buff1 = loadImage("buff1.jpg2);
background(buff1);
}
color extractColorFromImage(final PImage buff1) {
buff1.loadPixels();
color r = 1, g = 1, b = 1;
for (final color c : buff1.pixels) {
r += c >> 020 & 255;
g += c >> 010 & 255;
b += c & 255;
}
r /= buff1.pixels.length;
g /= buff1.pixels.length;
b /= buff1.pixels.length;
return color(r, g, b);
}
void draw()
{
background(buff1);
rectMode(CORNERS);
stroke(255,0,0);
strokeWeight(2);
strokeJoin(ROUND);
rect(x1,y1,x2,y2,2);
fill(255,0,0,50);
noStroke();
cursor(ARROW);
}
void mousePressed()
{
x1 = mouseX;
y1 = mouseY;
}
void mouseDragged()
{
x2 = mouseX;
y2 = mouseY;
}
void mouseReleased()
{
int H1 = abs(1+x2-x1);
int H2 = abs(1+y2-y1);
for (int i=0; i<width; i+=H1)
{
for (int j=0; j<height; j+=H2)
{
PImage newImg = buff1.get(x1, y1, H1, H2);
fill(extractColorFromImage(newImg), 40);
noStroke();
cursor(ARROW);
}
}
}
Once the pixel data for an image have been loaded by loadPixels(), the loaded pixels can be accessed and changed by pixels[].
updatePixels() updates the image with the data in its pixels[] array:
void mouseReleased()
{
int x_1 = min(x1,x2);
int y_1 = min(y1,y2);
int x_2 = max(x1,x2);
int y_2 = max(y1,y2);
PImage newImg = buff1.get(x_1, y_1, x_2-x1+1, y_2-y1+1);
color new_color = extractColorFromImage(newImg);
buff1.loadPixels();
for (int i = x_1; i <= x_2; i++)
{
for (int j = y_1; j <= y_2; j++)
{
buff1.pixels[j*buff1.width+i] = new_color;
}
}
buff1.updatePixels();
}
When the mouse button is pressed, then I recommend to set (x2, y2), too:
void mousePressed()
{
x1 = mouseX;
y1 = mouseY;
x2 = x1;
y2 = y1;
}
Optionally the original color can be mixed with the new color by lerpColor():
PImage newImg = buff1.get(x_1, y_1, x_2-x1+1, y_2-y1+1);
color new_color = extractColorFromImage(newImg);
buff1.loadPixels();
for (int i = x_1; i <= x_2; i++)
{
for (int j = y_1; j <= y_2; j++)
{
color orig_color = buff1.pixels[j*buff1.width+i];
buff1.pixels[j*buff1.width+i] = lerpColor(orig_color, new_color, 0.5);
}
}
buff1.updatePixels();
I want to make a bounding box in texture.
This texture is the result of image Segmentation
I make Segmentation results into textures every time and I try to do a bounding box for this process
I have pixel-specific values for texture, but this changes randomly every time.
so, I tried to find pixel values with bfs algorithm.
public Queue<Node> SegNode = new Queue<Node>();
private bool[,] visit = new bool[256, 256];
private int[] dx = new int[4] { 0, 1, -1, 0 };
private int[] dy = new int[4] { 1, 0, 0, -1 };
public struct Node
{
public int x, y;
public float color;
public Node(int x, int y, float color)
{
this.x = x;
this.y = y;
this.color = color;
}
}
void bfs(int r, int c, float color, float[,,,] pixel)
{
Queue<Node> q = new Queue<Node>();
q.Enqueue(new Node(r, c, color));
while (q.Count > 0)
{
Node curNode = q.Dequeue();
SegNode.Enqueue(curNode);
for (int i = 0; i < 4; i++)
{
int tr = curNode.x + dx[i];
int tc = curNode.y + dy[i];
if (tr >= 0 && tr < segmentationImageSize && tc >= 0 && tc < segmentationImageSize)
{
if (!visit[tr, tc] && pixel[0, tc, tr, 0] == color)
{
visit[tr, tc] = true;
q.Enqueue(new Node(tr, tc, color));
}
}
}
}
And I thought about how to find the top and bottom.
But this seems to be too slow.
How can I get a bounding box easily?
I am using the result values for the segmentation to create a texture
Texture2D SegResTexture = new Texture2D(widht, height, );
for (int y = 0; y < SegResTexture.height; y++)
{
for (int x = 0; x < SegResTexture.width; x++)
{
SegResTexture.SetPixel(x, y, pixel[0, y, x, 0] < 0.1 ? maskTransparent : maskColor);
}
}
SegResTexture.Apply();
SegmentationRawIamge.GetComponent<RawImage>().texture = SegResTexture;
What I want to do is similar to the picture below.
Can you tell me how to make it or what sites to refer to?
I have an image of connected components(circles filled).If i want to segment them i can use watershed algorithm.I prefer writing my own function for watershed instead of using the inbuilt function in OPENCV.I have successfu How do i find the regionalmax of objects using opencv?
I wrote a function myself. My results were quite similar to MATLAB, although not exact. This function is implemented for CV_32F but it can easily be modified for other types.
I mark all the points that are not part of a minimum region by checking all the neighbors. The remaining regions are either minima, maxima or areas of inflection.
I use connected components to label each region.
I check each region for any point belonging to a maxima, if yes then I push that label into a vector.
Finally I sort the bad labels, erase all duplicates and then mark all the points in the output as not minima.
All that remains are the regions of minima.
Here is the code:
// output is a binary image
// 1: not a min region
// 0: part of a min region
// 2: not sure if min or not
// 3: uninitialized
void imregionalmin(cv::Mat& img, cv::Mat& out_img)
{
// pad the border of img with 1 and copy to img_pad
cv::Mat img_pad;
cv::copyMakeBorder(img, img_pad, 1, 1, 1, 1, IPL_BORDER_CONSTANT, 1);
// initialize binary output to 2, unknown if min
out_img = cv::Mat::ones(img.rows, img.cols, CV_8U)+2;
// initialize pointers to matrices
float* in = (float *)(img_pad.data);
uchar* out = (uchar *)(out_img.data);
// size of matrix
int in_size = img_pad.cols*img_pad.rows;
int out_size = img.cols*img.rows;
int x, y;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
neighborCheck(in, out, i, x, y, img_pad.cols); // all regions are either min or max
}
cv::Mat label;
cv::connectedComponents(out_img, label);
int* lab = (int *)(label.data);
in = (float *)(img.data);
in_size = img.cols*img.rows;
std::vector<int> bad_labels;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
if (lab[i] != 0) {
if (neighborCleanup(in, out, i, x, y, img.rows, img.cols) == 1) {
bad_labels.push_back(lab[i]);
}
}
}
std::sort(bad_labels.begin(), bad_labels.end());
bad_labels.erase(std::unique(bad_labels.begin(), bad_labels.end()), bad_labels.end());
for (int i = 0; i < out_size; ++i) {
if (lab[i] != 0) {
if (std::find(bad_labels.begin(), bad_labels.end(), lab[i]) != bad_labels.end()) {
out[i] = 0;
}
}
}
}
int inline neighborCleanup(float* in, uchar* out, int i, int x, int y, int x_lim, int y_lim)
{
int index;
for (int xx = x - 1; xx < x + 2; ++xx) {
for (int yy = y - 1; yy < y + 2; ++yy) {
if (((xx == x) && (yy==y)) || xx < 0 || yy < 0 || xx >= x_lim || yy >= y_lim)
continue;
index = xx*y_lim + yy;
if ((in[i] == in[index]) && (out[index] == 0))
return 1;
}
}
return 0;
}
void inline neighborCheck(float* in, uchar* out, int i, int x, int y, int x_lim)
{
int indexes[8], cur_index;
indexes[0] = x*x_lim + y;
indexes[1] = x*x_lim + y+1;
indexes[2] = x*x_lim + y+2;
indexes[3] = (x+1)*x_lim + y+2;
indexes[4] = (x + 2)*x_lim + y+2;
indexes[5] = (x + 2)*x_lim + y + 1;
indexes[6] = (x + 2)*x_lim + y;
indexes[7] = (x + 1)*x_lim + y;
cur_index = (x + 1)*x_lim + y+1;
for (int t = 0; t < 8; t++) {
if (in[indexes[t]] < in[cur_index]) {
out[i] = 0;
break;
}
}
if (out[i] == 3)
out[i] = 1;
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
I do not know if it is what you want, but in my answer to this post, I gave some code to find local maxima (peaks) in a grayscale image (resulting from distance transform).
The approach relies on subtracting the original image from the dilated image and finding the zero pixels).
I hope it helps,
Good luck
I had the same problem some time ago, and the solution was to reimplement the imregionalmax algorithm in OpenCV/Cpp. It is not that complicated, because you can find the C++ source code of the function in the Matlab distribution. (somewhere in toolbox). All you have to do is to read carefully and understand the algorithm described there. Then rewrite it or remove the matlab-specific checks and you'll have it.
I need to rotate an image by very small angle, like 1-5 degrees. Does OpenCV provide simple way of doing that? From reading docs i can assume that getAffineTransform() should be involved, but there is no direct example of doing something like:
IplImage *rotateImage( IplImage *source, double angle);
If you use OpenCV > 2.0 it is as easy as
using namespace cv;
Mat rotateImage(const Mat& source, double angle)
{
Point2f src_center(source.cols/2.0F, source.rows/2.0F);
Mat rot_mat = getRotationMatrix2D(src_center, angle, 1.0);
Mat dst;
warpAffine(source, dst, rot_mat, source.size());
return dst;
}
Note: angle is in degrees, not radians.
See the C++ interface documentation for more details and adapt as you need:
getRotationMatrix
warpAffine
Edit: To down voter: Please comment the reason for down voting a tried and tested code?
#include "cv.h"
#include "highgui.h"
#include "math.h"
int main( int argc, char** argv )
{
IplImage* src = cvLoadImage("lena.jpg", 1);
IplImage* dst = cvCloneImage( src );
int delta = 1;
int angle = 0;
int opt = 1; // 1: rotate & zoom
// 0: rotate only
double factor;
cvNamedWindow("src", 1);
cvShowImage("src", src);
for(;;)
{
float m[6];
CvMat M = cvMat(2, 3, CV_32F, m);
int w = src->width;
int h = src->height;
if(opt)
factor = (cos(angle*CV_PI/180.) + 1.05) * 2;
else
factor = 1;
m[0] = (float)(factor*cos(-angle*2*CV_PI/180.));
m[1] = (float)(factor*sin(-angle*2*CV_PI/180.));
m[3] = -m[1];
m[4] = m[0];
m[2] = w*0.5f;
m[5] = h*0.5f;
cvGetQuadrangleSubPix( src, dst, &M);
cvNamedWindow("dst", 1);
cvShowImage("dst", dst);
if( cvWaitKey(1) == 27 )
break;
angle =(int)(angle + delta) % 360;
}
return 0;
}
UPDATE: See the following code for rotation using warpaffine
https://code.google.com/p/opencvjp-sample/source/browse/trunk/cpp/affine2_cpp.cpp?r=48
#include <cv.h>
#include <highgui.h>
using namespace cv;
int
main(int argc, char **argv)
{
// (1)load a specified file as a 3-channel color image,
// set its ROI, and allocate a destination image
const string imagename = argc > 1 ? argv[1] : "../image/building.png";
Mat src_img = imread(imagename);
if(!src_img.data)
return -1;
Mat dst_img = src_img.clone();
// (2)set ROI
Rect roi_rect(cvRound(src_img.cols*0.25), cvRound(src_img.rows*0.25), cvRound(src_img.cols*0.5), cvRound(src_img.rows*0.5));
Mat src_roi(src_img, roi_rect);
Mat dst_roi(dst_img, roi_rect);
// (2)With specified three parameters (angle, rotation center, scale)
// calculate an affine transformation matrix by cv2DRotationMatrix
double angle = -45.0, scale = 1.0;
Point2d center(src_roi.cols*0.5, src_roi.rows*0.5);
const Mat affine_matrix = getRotationMatrix2D( center, angle, scale );
// (3)rotate the image by warpAffine taking the affine matrix
warpAffine(src_roi, dst_roi, affine_matrix, dst_roi.size(), INTER_LINEAR, BORDER_CONSTANT, Scalar::all(255));
// (4)show source and destination images with a rectangle indicating ROI
rectangle(src_img, roi_rect.tl(), roi_rect.br(), Scalar(255,0,255), 2);
namedWindow("src", CV_WINDOW_AUTOSIZE);
namedWindow("dst", CV_WINDOW_AUTOSIZE);
imshow("src", src_img);
imshow("dst", dst_img);
waitKey(0);
return 0;
}
Check my answer to a similar problem:
Rotating an image in C/C++
Essentially, use cvWarpAffine - I've described how to get the 2x3 transformation matrix from the angle in my previous answer.
Updating full answer for OpenCV 2.4 and up
// ROTATE p by R
/**
* Rotate p according to rotation matrix (from getRotationMatrix2D()) R
* #param R Rotation matrix from getRotationMatrix2D()
* #param p Point2f to rotate
* #return Returns rotated coordinates in a Point2f
*/
Point2f rotPoint(const Mat &R, const Point2f &p)
{
Point2f rp;
rp.x = (float)(R.at<double>(0,0)*p.x + R.at<double>(0,1)*p.y + R.at<double>(0,2));
rp.y = (float)(R.at<double>(1,0)*p.x + R.at<double>(1,1)*p.y + R.at<double>(1,2));
return rp;
}
//COMPUTE THE SIZE NEEDED TO LOSSLESSLY STORE A ROTATED IMAGE
/**
* Return the size needed to contain bounding box bb when rotated by R
* #param R Rotation matrix from getRotationMatrix2D()
* #param bb bounding box rectangle to be rotated by R
* #return Size of image(width,height) that will compleley contain bb when rotated by R
*/
Size rotatedImageBB(const Mat &R, const Rect &bb)
{
//Rotate the rectangle coordinates
vector<Point2f> rp;
rp.push_back(rotPoint(R,Point2f(bb.x,bb.y)));
rp.push_back(rotPoint(R,Point2f(bb.x + bb.width,bb.y)));
rp.push_back(rotPoint(R,Point2f(bb.x + bb.width,bb.y+bb.height)));
rp.push_back(rotPoint(R,Point2f(bb.x,bb.y+bb.height)));
//Find float bounding box r
float x = rp[0].x;
float y = rp[0].y;
float left = x, right = x, up = y, down = y;
for(int i = 1; i<4; ++i)
{
x = rp[i].x;
y = rp[i].y;
if(left > x) left = x;
if(right < x) right = x;
if(up > y) up = y;
if(down < y) down = y;
}
int w = (int)(right - left + 0.5);
int h = (int)(down - up + 0.5);
return Size(w,h);
}
/**
* Rotate region "fromroi" in image "fromI" a total of "angle" degrees and put it in "toI" if toI exists.
* If toI doesn't exist, create it such that it will hold the entire rotated region. Return toI, rotated imge
* This will put the rotated fromroi piece of fromI into the toI image
*
* #param fromI Input image to be rotated
* #param toI Output image if provided, (else if &toI = 0, it will create a Mat fill it with the rotated image roi, and return it).
* #param fromroi roi region in fromI to be rotated.
* #param angle Angle in degrees to rotate
* #return Rotated image (you can ignore if you passed in toI
*/
Mat rotateImage(const Mat &fromI, Mat *toI, const Rect &fromroi, double angle)
{
//CHECK STUFF
// you should protect against bad parameters here ... omitted ...
//MAKE OR GET THE "toI" MATRIX
Point2f cx((float)fromroi.x + (float)fromroi.width/2.0,fromroi.y +
(float)fromroi.height/2.0);
Mat R = getRotationMatrix2D(cx,angle,1);
Mat rotI;
if(toI)
rotI = *toI;
else
{
Size rs = rotatedImageBB(R, fromroi);
rotI.create(rs,fromI.type());
}
//ADJUST FOR SHIFTS
double wdiff = (double)((cx.x - rotI.cols/2.0));
double hdiff = (double)((cx.y - rotI.rows/2.0));
R.at<double>(0,2) -= wdiff; //Adjust the rotation point to the middle of the dst image
R.at<double>(1,2) -= hdiff;
//ROTATE
warpAffine(fromI, rotI, R, rotI.size(), INTER_CUBIC, BORDER_CONSTANT, Scalar::all(0));
//& OUT
return(rotI);
}
IplImage* rotate(double angle, float centreX, float centreY, IplImage* src, bool crop)
{
int w=src->width;
int h=src->height;
CvPoint2D32f centre;
centre.x = centreX;
centre.y = centreY;
CvMat* warp_mat = cvCreateMat(2, 3, CV_32FC1);
cv2DRotationMatrix(centre, angle, 1.0, warp_mat);
double m11= cvmGet(warp_mat,0,0);
double m12= cvmGet(warp_mat,0,1);
double m13= cvmGet(warp_mat,0,2);
double m21= cvmGet(warp_mat,1,0);
double m22= cvmGet(warp_mat,1,1);
double m23= cvmGet(warp_mat,1,2);
double m31= 0;
double m32= 0;
double m33= 1;
double x=0;
double y=0;
double u0= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v0= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=w;
y=0;
double u1= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v1= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=0;
y=h;
double u2= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v2= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=w;
y=h;
double u3= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v3= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
int left= MAX(MAX(u0,u2),0);
int right= MIN(MIN(u1,u3),w);
int top= MAX(MAX(v0,v1),0);
int bottom= MIN(MIN(v2,v3),h);
ASSERT(left<right&&top<bottom); // throw message?
if (left<right&&top<bottom)
{
IplImage* dst= cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, src->nChannels);
cvWarpAffine(src, dst, warp_mat/*, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll(0)*/);
if (crop) // crop and resize to initial size
{
IplImage* dst_crop= cvCreateImage(cvSize(right-left, bottom-top), IPL_DEPTH_8U, src->nChannels);
cvSetImageROI(dst,cvRect(left,top,right-left,bottom-top));
cvCopy(dst,dst_crop);
cvReleaseImage(&dst);
cvReleaseMat(&warp_mat);
//ver1
//return dst_crop;
// ver2 resize
IplImage* out= cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, src->nChannels);
cvResize(dst_crop,out);
cvReleaseImage(&dst_crop);
return out;
}
else
{
/*cvLine( dst, cvPoint(left,top),cvPoint(left, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(right,top),cvPoint(right, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(left,top),cvPoint(right, top), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(left,bottom),cvPoint(right, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);*/
cvReleaseMat(&warp_mat);
return dst;
}
}
else
{
return NULL; //assert?
}
}