Convert Dicom image pixel value to Unity color - unity3d

I use SimpleITK to read a Dicom image in Unity. I create a list of Unity Texture2D to store Dicom slices. My question is how can we convert the pixel value from Dicom image to Unity Color?
List<Texture2D> _imageListTexture = new List<Texture2D>();
for (int k = 0; k < depth; k++)
{
Texture2D _myTex = new Texture2D(width, height, TextureFormat.ARGB32, true);
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
int idx = i + width * (j + height * k);
float pixel = liverVoxels[idx]; //We have the pixel value
Color32 _col = new Color32(); //Unity has rgba but we have only one value
//What to do here?
_myTex.SetPixel(i, j, _col);
}
}
_myTex.Apply();
_imageListTexture.Add(_myTex);
}

It's very simple like this:
float pixel = liverVoxels[idx]; //We have the pixel value
Color32 _col = new Color32(pixel, pixel, pixel,255);
_myTex.SetPixel(i, j, _col);

Related

How to square crop a cameraImage in Flutter

I'm creating a scanner and I needed to implement a square overlay to the Camera preview, I take the image stream from the Camera Preview and send it to an API, now after adding the square overlay I need to square crop the cameraImage taken from the Camera preview before sending it to the API.
I only have the cameraImage -which is YUV 420 format- taken how can I crop it programmatically?
I guess that for the full image you coded something like that:
Uint8List getBytes() {
final WriteBuffer allBytes = WriteBuffer();
for (final Plane plane in cameraImage.planes) {
allBytes.putUint8List(plane.bytes);
}
return allBytes.done().buffer.asUint8List();
}
In fact, you are concatenating one after the other the data of the 3 planes of YUV: all Y, then all U, then all V.
As you can see in the wikipedia page plane Y has the same width and height as the image, while planes U and V use width/2 and height/2.
If we go byte after byte that means that the code above is similar to the following code:
int divider = 1; // for first plane: Y
for (final Plane plane in cameraImage.planes) {
for (int i = 0; i < cameraImage.height ~/ divider; i++) {
for (int j = 0; j < cameraImage.width ~/ divider; j++) {
allBytes.putUint8(plane.bytes[j + i * cameraImage.width ~/ divider]);
}
}
divider = 2; // for planes U and V
}
Now that you're here, I think you understand how to crop:
int divider = 1; // for first plane: Y
for (final Plane plane in cameraImage.planes) {
for (int i = cropTop ~/ divider; i < cropBottom ~/ divider; i++) {
for (int j = cropLeft ~/ divider; j < cropRight ~/ divider; j++) {
allBytes.putUint8(plane.bytes[j + i * cameraImage.width ~/ divider]);
}
}
divider = 2; // for planes U and V
}
where the crop* variables are computed from the full image.
That's the theory: this piece of code does not take into consideration the camera orientation, the possible side-effects with odd sizes and the performances. But that's the general idea.

How to optimize flutter CameraImage to TensorImage?

That function is too slow. So Flutter CameraImage efficiency convert to TensorImage in dart?
var img = imglib.Image(image.width, image.height); // Create Image buffer
Plane plane = image.planes[0];
const int shift = (0xFF << 24);
// Fill image buffer with plane[0] from YUV420_888
for (int x = 0; x < image.width; x++) {
for (int planeOffset = 0;
planeOffset < image.height * image.width;
planeOffset += image.width) {
final pixelColor = plane.bytes[planeOffset + x];
// color: 0x FF FF FF FF
// A B G R
// Calculate pixel color
var newVal =
shift | (pixelColor << 16) | (pixelColor << 8) | pixelColor;
img.data[planeOffset + x] = newVal;
}
}
return img;
}```
Seems your for loop is inefficient. The data for whole row (with same placeOffset, different x) will be cached at once, so would be faster to switch ordering of the two loops.
for (int y = 0; y < image.height; y++) {
for (int x = 0; x < image.width; x++) {
final pixelColor = plane.bytes[y * image.width + x];
// ...
}
}
However, your code does not seems to be reading from the actual camera stream. please refer this thread for converting CameraImage to Image.
How to convert Camera Image to Image in Flutter?

Large triangular polygon with a large number of points in the mesh

When creating the mesh, I had a problem: both in the game, and in the scene appears a large triangular polygon. The problem is that with a system size of 250 by 250 points there aren't this polygon. At a size of 400 by 400 points, it already appears.
What I did:
void Start () {
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = mf.mesh;
int size = 400;
Vector3[] vertices = new Vector3[size * size];
Vector3[] normals = new Vector3[size * size];
Vector2[] uvc = new Vector2[size * size];
int c = 0;
for (int i =0; i < size; i++)
{
for(int j =0; j < size; j++)
{
vertices[c] = new Vector3(i, j, Mathf.Sin(j+i+Mathf.PI));
normals[c] = -Vector3.forward;
float mult = 1.0f / ((float)(size));
uvc[c] = new Vector2(((float)(i))*mult, ((float)(j)) * mult);
c++;
}
};
int[] triangles = new int[(size-1)*(size-1)*6];
int counter = 0;
{
for (int i = 0; i < size - 1; i++)
{
for (int j = 0; j < size - 1; j++)
{
setTriangle(ref triangles, ref counter, i * size + j);
setTriangle(ref triangles, ref counter, (i + 1) * size + j);
setTriangle(ref triangles, ref counter, i * size + j + 1);
setTriangle(ref triangles, ref counter, (i + 1) * size + j);
setTriangle(ref triangles, ref counter, (i + 1) * size + j + 1);
setTriangle(ref triangles, ref counter, i * size + j + 1);
};
}
};
mesh.Clear();
mesh.vertices = vertices;
mesh.triangles = triangles;
mesh.normals = normals;
mesh.uv = uvc;
mesh.RecalculateNormals();
gameObject.transform.position = new Vector3(-55, -60, 90);
void setTriangle(ref int[] triangle, ref int index, int num)
{
triangle[index++] = num;
}
And what I have in results. The first picture is with 250 by 250 points. The second is with 400 by 400. The third is just bigger picture
I think you are over the max. number of vertices for the mesh when the size is 400x400.
The default max. number of vertices for a mesh is 65535. If you want to have more, then you have to set:
mesh.indexFormat = Rendering.IndexFormat.UInt32;
but this is not guaranteed to be supported in all platforms.

How to find Center of Mass for my entire binary image?

I'm interested in finding the coordinates (X,Y) for my whole, entire binary image, and not the CoM for each component seperatly.
How can I make it efficiently?
I guess using regionprops, but couldn't find the correct way to do so.
You can define all regions as a single region for regionprops
props = regionprops( double( BW ), 'Centroid' );
According to the data type of BW regionprops decides whether it should label each connected component as a different region or treat all non-zeros as a single region with several components.
Alternatively, you can compute the centroid by yourself
[y x] = find( BW );
cent = [mean(x) mean(y)];
Just iterate over all the pixels calculate the average of their X and Y coordinate
void centerOfMass (int[][] image, int imageWidth, int imageHeight)
{
int SumX = 0;
int SumY = 0;
int num = 0;
for (int i=0; i<imageWidth; i++)
{
for (int j=0; j<imageHeight; j++)
{
if (image[i][j] == WHITE)
{
SumX = SumX + i;
SumY = SumY + j;
num = num+1;
}
}
}
SumX = SumX / num;
SumY = SumY / num;
// The coordinate (SumX,SumY) is the center of the image mass
}
Extending this method to gray scale images in range of [0..255]: Instead of
if (image[i][j] == WHITE)
{
SumX = SumX + i;
SumY = SumY + j;
num = num+1;
}
Use the following calculation
SumX = SumX + i*image[i][j];
SumY = SumY + j*image[i][j];
num = num+image[i][j];
In this case a pixel of value 100 has 100 times higher weight than dark pixel with value 1, so dark pixels contribute a rather small fraction to the center of mass calculation.
Please note that in this case, if your image is large you might hit a 32 bits integer overflow so in that case use long int sumX, sumY variables instead of int.

Transformation of coordinates, Concept

i want to convert x,y,z coordinates to polar coordinates. I am getting (-) in y coordiantes. Can someone explain me why I am getting it. It would be great help.
I am reading these values (xyz , az_elev_r) from a software and can't be changed.I am just not sure of the order of angles( az & elev). Using my code I get -y instead of y. It means there is 180 rotation.My code is:
xyz=[-0.564 3.689 -0.735;
2.011 5.067 -1.031;
-1.181 3.943 -1.825; % Reference values
];
%% az_elev_r-->xyz
az_elev_r=[ 261.30 -11.24 3.80;
291.65 -10.692 5.548;
253.34 -23.897 4.50]; % Also Reference (degree)
az_elev_r(:,1:2)=deg2rad(az_elev_r(:,1:2));
r=az_elev_r(:,3);
az=az_elev_r(:,1);
elev=az_elev_r(:,2);
x=r.*cos(az).*cos(elev)
y=r.*sin(az).*cos(elev)
z=r.*sin(elev)
Your az_elev_r matrix is not consistent with your xyz reference.
>> [az, el, r] = cart2sph(xyz(:,1), xyz(:,2), xyz(:,3));
>> rad2deg(az)
ans =
98.6924675475501
68.3527736950233
106.673911589314
Your answers are consistent with the values returned by the sph2cart function. (Example starts with your original input, before the dec2rad replacement.
>> [x, y, z] = sph2cart(deg2rad(az_elev_r(:,1)), deg2rad(az_elev_r(:,2)), az_elev_r(:,3))
x =
-0.563766229670505
2.01131973806906
-1.17951822049783
y =
-3.68422880893852
-5.06709019311118
-3.94153436658676
z =
-0.740692730942158
-1.02931719412937
-1.82292172199717
Incidentally, you're code will be more readable if you just use the sph2cart function, and work in radians, unless you are trying to understand the conversions for their own sake.
OpenCV has the code for conversion to polar coordinates and back. This conversion is useful for finding object rotation through correlation or otherwise creating object-centred 'rotation-independent' representation of objects. It is useful to visualize each of the polar coordinates as well as their joint image. The images below should be self_explanatory. The polar plot has angle as a horizontal axis and Radius as a vertical axis, so that 4 peaks correspond to the 4 corners of the input image. The code (C++ with OpenCV) is attached.
//================================
// Name : PolarCoord.cpp
// Author : V.Ivanchenko cudassimo#gmail.com
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//======================================
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define VALID(x, y, w, h) ((x)>=0 && (y)>=0 && (x)<(w) && (y)<(h)) // validates index
/*
* 1. Original binary image HxW CV_8U
* |
* |
* V
* 2. Two coordinate Mats HxW CV_32F
* |
* |
* V
* 3. Visualization CV_8U
* a. gray HxW for a single coordinate image
* b. binary Rx360 for two coordinate images
*/
// convert a binary 2D image into two Mats with float coordiantes
void imageToCoord(const Mat& img, Mat& X, Mat& Y, bool centered = true) {
if (img.empty())
return;
int h = img.rows;
int w = img.cols;
X.create(h, w, CV_32F);
Y.create(h, w, CV_32F);
float Cx = w/2.0f;
float Cy = h/2.0f;
for (int i=0; i<h; ++i){
const uchar* img_row = img.ptr<uchar>(i);
float* x_row = X.ptr<float>(i);
float* y_row = Y.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (img_row[j]>0) {
float x = j;
float y = i;
if (centered) {
x-=Cx;
y-=Cy;
}
x_row[j] = x;
y_row[j] = y;
}
} // j
} // i
} //imageToCoord()
// convert a single float ploar coord Mat to a gray image
void polarToImg(const Mat& PolarCoord, Mat& img) {
if (PolarCoord.empty())
return;
int h = PolarCoord.rows;
int w = PolarCoord.cols;
img.create(h, w, CV_8U);
float maxVal = std::numeric_limits<float>::min();
// find maxVal
for (int i=0; i<h; ++i){
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (maxVal < x_row[j])
maxVal = x_row[j];
} // j
} // i
// create an image
if (maxVal>0) {
float k = 255.0/maxVal;
for (int i=0; i<h; ++i){
uchar* img_row = img.ptr<uchar>(i);
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
img_row[j] = saturate_cast<uchar>(k*x_row[j]);
}// j
} // i
} // if
} // plarToImg()
// convert two polar coord Mats to a binary image
void polarToImg(const Mat& radius, const Mat& angle, Mat& img) {
if (angle.empty() || radius.empty())
return;
int h = angle.rows;
int w = angle.cols;
assert(radius.cols==w && radius.rows==h);
const int imgH = sqrt(h*h+w*w)+0.5f; // radius
const int imgW = 360; // angle, deg
img.create(imgH, imgW, CV_8U);
// create an image
for (int i=0; i<h; ++i){
const float* ang_row = angle.ptr<float>(i);
const float* r_row = radius.ptr<float>(i);
for (int j=0; j<w; ++j) {
int x = ang_row[j] + 0.5f;
int y = r_row[j] + 0.5f;
if (x>0) {
cout<<x<<endl;
}
if (VALID(x, y, imgW, imgH))
img.at<uchar>(y, x) = 255;
else {
cout<<"Invalid x, y: "<<x<<", "<<y<<endl;
}
}// j
} // i
} // plarToImg()
int main() {
cout << "Cartesian to polar" << endl; // prints "Syntax training in openCV"
const int W=400, H=400;
Mat Minput(H, W, CV_8U);
Minput(Rect(W/4, H/4, W/2, H/2)) = 255;
Mat X, Y, Angle, Radius, Mr, Mang, Mpolar;
// processing
imageToCoord(Minput, X, Y); // extract coordinates
cartToPolar(X, Y, Radius, Angle, true);// convert coordiantes
// visualize
polarToImg(Radius, Mr);
polarToImg(Angle, Mang);
polarToImg(Radius, Angle, Mpolar);
// debug
//cout<<Mpolar<<endl;
namedWindow("input", 0);
namedWindow("angle", 0);
namedWindow("radius", 0);
namedWindow("Polar", 0);
const int winw=200, winh=200;
resizeWindow("input", winw, winh);
resizeWindow("angle", winw, winh);
resizeWindow("radius", winw, winh);
resizeWindow("Polar", 360, (int)sqrt(H*H + W*W));
moveWindow("input", 0, 0);
moveWindow("angle", winw, 0);
moveWindow("radius", 2*winw, 0);
moveWindow("Polar", 3*winw, 0);
imshow("input", Minput);
imshow("angle", Mang);
imshow("radius", Mr);
imshow("Polar", Mpolar);
waitKey(-1);
return 0;
}