How to convert RGB pixmap to ui.Image in Dart? - flutter

Currently I have a Uint8List, formatted like [R,G,B,R,G,B,...] for all the pixels of the image. And of course I have its width and height.
I found decodeImageFromPixels while searching but it only takes RGBA/BGRA format. I converted my pixmap from RGB to RGBA and this function works fine.
However, my code now looks like this:
Uint8List rawPixel = raw.value.asTypedList(w * h * channel);
List<int> rgba = [];
for (int i = 0; i < rawPixel.length; i++) {
rgba.add(rawPixel[i]);
if ((i + 1) % 3 == 0) {
rgba.add(0);
}
}
Uint8List rgbaList = Uint8List.fromList(rgba);
Completer<Image> c = Completer<Image>();
decodeImageFromPixels(rgbaList, w, h, PixelFormat.rgba8888, (Image img) {
c.complete(img);
});
I have to make a new list(waste in space) and iterate through the entire list(waste in time).
This is too inefficient in my opinion, is there any way to make this more elegant? Like add a new PixelFormat.rgb888?
Thanks in advance.

You may find that this loop is faster as it doesn't keep appending to the list and then copy it at the end.
final rawPixel = raw.value.asTypedList(w * h * channel);
final rgbaList = Uint8List(w * h * 4); // create the Uint8List directly as we know the width and height
for (var i = 0; i < w * h; i++) {
final rgbOffset = i * 3;
final rgbaOffset = i * 4;
rgbaList[rgbaOffset] = rawPixel[rgbOffset]; // red
rgbaList[rgbaOffset + 1] = rawPixel[rgbOffset + 1]; // green
rgbaList[rgbaOffset + 2] = rawPixel[rgbOffset + 2]; // blue
rgbaList[rgbaOffset + 3] = 255; // a
}
An alternative is to prepend the array with a BMP header by adapting this answer (though it would simpler as there would be no palette) and passing that bitmap to instantiateImageCodec as that code is presumably highly optimized for parsing bitmaps.

Related

How to convert CameraImage first to jpeg and after to base64 Flutter

Is it possible to convert CameraImage (yuv420 or bgra) to jpeg and after that encode it in base64?
List<String> convertFaceImages(Map<String, CameraImage> images) {
final sortedImages = sortImages(images);
List<String> base64Images = [];
List<img.Image> listImage = [];
for (CameraImage image in sortedImages) {
var yuv420Image =
img.Image(image.width, image.height); // Create Image buffer
Plane plane = image.planes[0];
const int shift = (0xFF << 24);
// Fill image buffer with plane[0] from YUV420_888
for (int x = 0; x < image.width; x++) {
for (int planeOffset = 0;
planeOffset < image.height * image.width;
planeOffset += image.width) {
final pixelColor = plane.bytes[planeOffset + x];
// color: 0x FF FF FF FF
// A B G R
// Calculate pixel color
var newVal =
shift | (pixelColor << 16) | (pixelColor << 8) | pixelColor;
yuv420Image.data[planeOffset + x] = newVal;
}
}
listImage.add(yuv420Image);
}
for (img.Image image in listImage) {
final encodedImage = base64Encode(img.encodeJpg(image));
logger.w(encodedImage);
base64Images.add(encodedImage);
}
return base64Images;
}
I tried do it with this code, but when I try to decode this image on backend (or https://base64.guru/converter/decode/image) I got just a black image. More of than, when I was triyng to display this images in my app, images was black-white an rotated on 90 degrees.
Can someone help me please? I'm actually trying to get this right for a very long time.

Convert YUV420 to RGB flutter

const shift = (0xFF << 24);
Future<Image> convertYUV420toImageColor(CameraImage image) async {
try {
final int width = image.width;
final int height = image.height;
final int uvRowStride = image.planes[1].bytesPerRow;
final int uvPixelStride = image.planes[1].bytesPerPixel;
print("uvRowStride: " + uvRowStride.toString());
print("uvPixelStride: " + uvPixelStride.toString());
// imgLib -> Image package from https://pub.dartlang.org/packages/image
var img = imglib.Image(width, height); // Create Image buffer
// Fill image buffer with plane[0] from YUV420_888
for(int x=0; x < width; x++) {
for(int y=0; y < height; y++) {
final int uvIndex = uvPixelStride * (x/2).floor() + uvRowStride*(y/2).floor();
final int index = y * width + x;
final yp = image.planes[0].bytes[index];
final up = image.planes[1].bytes[uvIndex];
final vp = image.planes[2].bytes[uvIndex];
// Calculate pixel color
int r = (yp + vp * 1436 / 1024 - 179).round().clamp(0, 255);
int g = (yp - up * 46549 / 131072 + 44 -vp * 93604 / 131072 + 91).round().clamp(0, 255);
int b = (yp + up * 1814 / 1024 - 227).round().clamp(0, 255);
// color: 0x FF FF FF FF
// A B G R
img.data[index] = shift | (b << 16) | (g << 8) | r;
}
}
imglib.PngEncoder pngEncoder = new imglib.PngEncoder(level: 0, filter: 0);
List<int> png = pngEncoder.encodeImage(img);
muteYUVProcessing = false;
return Image.memory(png);
} catch (e) {
print(">>>>>>>>>>>> ERROR:" + e.toString());
}
return null;
}
I have been following this code snippet from How to convert Camera Image to Image in Flutter? to convert YUV to RGB to send the images via WebSockets for ML prediction.
Although it works to convert, the resulting image is rotated 90 degrees and the performance is a little bit slow. How I can rotate it?
replace img.data[index] = shift | (b << 16) | (g << 8) | r;
with
if (img.boundsSafe(height-y, x)){
img.setPixelRgba(height-y, x, r , g ,b ,shift);
}
and replace var img = imglib.Image(width, height);
with
var img = imglib.Image(height, width);
For IOS version, the CameraImage is returned as biplanar which has only two planes.
Quote from image_format_group.dart:
/// Multi-plane YUV 420 format.
/// This format is a generic YCbCr format, capable of describing any 4:2:0
/// chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
/// with 8 bits per color sample.
/// On Android, this is `android.graphics.ImageFormat.YUV_420_888`. See
/// https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888
/// On iOS, this is `kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange`. See
/// https://developer.apple.com/documentation/corevideo/1563591-pixel_format_identifiers/kcvpixelformattype_420ypcbcr8biplanarvideorange?language=objc
yuv420
For my approach, I use dart:ffi and link to c++ for Android CameraImage following the tutorial from here. The conversion between YUV420p and YUV420sp can be found here. There is no complete code for the conversion yet, but neither any solution for IOS around the forum.

How to optimize flutter CameraImage to TensorImage?

That function is too slow. So Flutter CameraImage efficiency convert to TensorImage in dart?
var img = imglib.Image(image.width, image.height); // Create Image buffer
Plane plane = image.planes[0];
const int shift = (0xFF << 24);
// Fill image buffer with plane[0] from YUV420_888
for (int x = 0; x < image.width; x++) {
for (int planeOffset = 0;
planeOffset < image.height * image.width;
planeOffset += image.width) {
final pixelColor = plane.bytes[planeOffset + x];
// color: 0x FF FF FF FF
// A B G R
// Calculate pixel color
var newVal =
shift | (pixelColor << 16) | (pixelColor << 8) | pixelColor;
img.data[planeOffset + x] = newVal;
}
}
return img;
}```
Seems your for loop is inefficient. The data for whole row (with same placeOffset, different x) will be cached at once, so would be faster to switch ordering of the two loops.
for (int y = 0; y < image.height; y++) {
for (int x = 0; x < image.width; x++) {
final pixelColor = plane.bytes[y * image.width + x];
// ...
}
}
However, your code does not seems to be reading from the actual camera stream. please refer this thread for converting CameraImage to Image.
How to convert Camera Image to Image in Flutter?

Attempting to automatically crop to data within a List and convert that to Image

I have an image that I initialized to be all 0 values.
img.Image onlyDog = img.Image.from(decodedImage); // decodedImage is of size 720, 1080
// reset all pixels to 0.
img.fill(onlyDog, img.Color.fromRgba(0, 0, 0, 0));
The full size of the onlyDog image is 720 x 1080. But a small portion of this image has been give values based on an if condition.
To get to the pixel level I had to do:
// get pixel level information for this image via getBytes()
var pixels = decodedImage.getBytes();
// do same for the dog image
var dogPixels = onlyDog.getBytes();
The pixels I need are given values in the following loop:
// get pixel level information for this image via getBytes()
var pixels = decodedImage.getBytes();
// do same for the dog image
var dogPixels = onlyDog.getBytes();
for (int i = 0; i < pixels.length - 4; i += 4) {
//pixels are in format RGBA
var pixelColor = [pixels[i], pixels[i + 1], pixels[i + 2], pixels[i + 3]];
var dogColor = [44, 232, 128, 255];
if (listEquals(pixelColor, dogColor)){
dogPixels[i] = pixels[i]; // R
dogPixels[i + 1] = pixels[i + 1]; // G
dogPixels[i + 2] = pixels[i + 2]; // B
dogPixels[i + 3] = 255; // A - keep this at max as this is the effect I want
}
}
Now, unlike python and other languages, images are not stored in clean [row x column] format but rather as a List so this makes it even more difficult to get the exact coordinates of the pixels set. What I want to do is to crop to the content and not return the massive space of emty [0,0,0,0] data.
What I tried:
I created a dynamic list and added any pixels that are not zero, this would basically extract only the meaningful content from onlyDog:
List<int> contentPixelList = [];
// below for loop to look for cropping to content
for (int i = 0; i < dogPixels.length - 4; i += 4) {
var pixelColor = [dogPixels[i], dogPixels[i + 1], dogPixels[i + 2], dogPixels[i + 3]];
if (!listEquals(pixelColor, [0, 0, 0, 0])){
contentPixelList.add(dogPixels[i]);
contentPixelList.add(dogPixels[i+1]);
contentPixelList.add(dogPixels[i+2]);
contentPixelList.add(dogPixels[i+3]);
//print(dogPixels[i]);
}
}
I then converted this to a fixed List as we get an error if an Image is created from a growableList.
List<int> fixedListContent = new List<int>.from(contentPixelList, growable: false);
And an Image is created from this:
Uint8List dogCropList = new Uint8List.fromList(fixedListContent);
//no image from above list
img.Image dogCropImage = img.decodeImage(dogCropList);
The dogCropImage returned is null, then I realized that I should have been using fromBytes as the data I have is of type List<int> But the problem here is that Image.fromBytes() requires a height and width argument as well which I cannot know from the above list I populated,.
Any help would be appreciated?
All images store their pixel data in a list/array format. Libraries just exist to translate x and y coordinates into array indices. The conversion is pretty straightforward:
int stride = width * bytesPerPixel;
int index = (y * stride) + (x * bytesPerPixel);
Using this, it's pretty simple to modify pixels in a list of bytes. For example:
for (int y = startY; y <= endY; y++) {
int yStride = y * stride;
for (int x = startX; x <= endX; x++) {
int index = yStride + (x * bytesPerPixel);
// Assuming an ARGB format
pixelData[index] = aValue;
pixelData[index+1] = rValue;
pixelData[index+2] = gValue;
pixelData[index+3] = bValue;
}
}

Save frame from TangoService_connectOnFrameAvailable

How can I save a frame via TangoService_connectOnFrameAvailable() and display it correctly on my computer? As this reference page mentions, the pixels are stored in the HAL_PIXEL_FORMAT_YV12 format. In my callback function for TangoService_connectOnFrameAvailable, I save the frame like this:
static void onColorFrameAvailable(void* context, TangoCameraId id, const TangoImageBuffer* buffer)
{
...
std::ofstream fp;
fp.open(imagefile, std::ios::out | std::ios::binary );
int offset = 0;
for(int i = 0; i < buffer->height*2 + 1; i++) {
fp.write((char*)(buffer->data + offset), buffer->width);
offset += buffer->stride;
}
fp.close();
}
Then to get rid of the meta data in the first row and to display the image I run:
$ dd if="input.raw" of="new.raw" bs=1 skip=1280
$ vooya new.raw
I was careful to make sure in vooya that the channel order is yvu. The resulting output is:
What am I doing wrong in saving the image and displaying it?
UPDATE per Mark Mullin's response:
int offset = buffer->stride; // header offset
// copy Y channel
for(int i = 0; i < buffer->height; i++) {
fp.write((char*)(buffer->data + offset), buffer->width);
offset += buffer->stride;
}
// copy V channel
for(int i = 0; i < buffer->height / 2; i++) {
fp.write((char*)(buffer->data + offset), buffer->width / 2);
offset += buffer->stride / 2;
}
// copy U channel
for(int i = 0; i < buffer->height / 2; i++) {
fp.write((char*)(buffer->data + offset), buffer->width / 2);
offset += buffer->stride / 2;
}
This now shows the picture below, but there are still some artifacts; I wonder if that's from the Tango tablet camera or my processing of the raw data... any thoughts?
Can't say exactly what you're doing wrong AND tango images often have artifacts in them - yours are new, but I often see baby blue as a color where glare seems to be annoying deeper systems, and as it begins to loose sync with the depth system under load, you'll often see what looks like a shiny grid (its the IR pattern, I think) - At the end, any rational attempt to handle the image with openCV etc failed, so I hand wrote the decoder with some help from SO thread here
That said, given imagebuffer contains a pointer to the raw data from Tango, and various other variables like height and stride are filled in from the data received in the callback, then this logic will create an RGBA map - yeah, I optimized the math in it, so it's a little ugly - it's slower but functionally equivalent twin is listed second. My own experience says its a horrible idea to try and do this decode right in the callback (I believe Tango is capable of loosing sync with the flash for depth for purely spiteful reasons), so mine runs at the render stage.
Fast
uchar* pData = TangoData::cameraImageBuffer;
uchar* iData = TangoData::cameraImageBufferRGBA;
int size = (int)(TangoData::imageBufferStride * TangoData::imageBufferHeight);
float invByte = 0.0039215686274509803921568627451; // ( 1 / 255)
int halfi, uvOffset, halfj, uvOffsetHalfj;
float y_scaled, v_scaled, u_scaled;
int uOffset = size / 4 + size;
int halfstride = TangoData::imageBufferStride / 2;
for (int i = 0; i < TangoData::imageBufferHeight; ++i)
{
halfi = i / 2;
uvOffset = halfi * halfstride;
for (int j = 0; j < TangoData::imageBufferWidth; ++j)
{
halfj = j / 2;
uvOffsetHalfj = uvOffset + halfj;
y_scaled = pData[i * TangoData::imageBufferStride + j] * invByte;
v_scaled = 2 * (pData[uvOffsetHalfj + size] * invByte - 0.5f) * Vmax;
u_scaled = 2 * (pData[uvOffsetHalfj + uOffset] * invByte - 0.5f) * Umax;
*iData++ = (uchar)((y_scaled + 1.13983f * v_scaled) * 255.0);;
*iData++ = (uchar)((y_scaled - 0.39465f * u_scaled - 0.58060f * v_scaled) * 255.0);
*iData++ = (uchar)((y_scaled + 2.03211f * u_scaled) * 255.0);
*iData++ = 255;
}
}
Understandable
for (int i = 0; i < TangoData::imageBufferHeight; ++i)
{
for (int j = 0; j < TangoData::imageBufferWidth; ++j)
{
uchar y = pData[i * image->stride + j];
uchar v = pData[(i / 2) * (TangoData::imageBufferStride / 2) + (j / 2) + size];
uchar u = pData[(i / 2) * (TangoData::imageBufferStride / 2) + (j / 2) + size + (size / 4)];
YUV2RGB(y, u, v);
*iData++ = y;
*iData++ = u;
*iData++ = v;
*iData++ = 255;
}
}
I think that there is a better way to do if you can to do it offline.
The best way to save the image should be something like this (don't forgot to create the folder Pictures or you won't save anything)
void onFrameAvailableRouter(void* context, TangoCameraId id, const TangoImageBuffer* buffer) {
//To write the image in a txt file.
std::stringstream name_stream;
name_stream.setf(std::ios_base::fixed, std::ios_base::floatfield);
name_stream.precision(3);
name_stream << "/storage/emulated/0/Pictures/"
<<cur_frame_timstamp_
<<".txt";
std::fstream f(name_stream.str().c_str(), std::ios::out | std::ios::binary);
// size = 1280*720*1.5 to save YUV or 1280*720 to save grayscale
int size = stride_ * height_ * 1.5;
f.write((const char *) buffer->data,size * sizeof(uint8_t));
f.close();
}
Then to convert the .txt file to png you can do this
inputFolder = "input"
outputFolderRGB = "output/rgb"
outputFolderGray = "output/gray"
input_filename = "timestamp.txt"
output_filename = "rgb.png"
allFile = listdir(inputFolder)
numberOfFile = len(allFile)
if "input" in glob.glob("*"):
if "output/rgb" in glob.glob("output/*"):
print ""
else:
makedirs("output/rgb")
if "output/gray" in glob.glob("output/*"):
print ""
else:
makedirs("output/gray")
#The output reportories are ready
for file in allFile:
count+=1
print "current file : ",count,"/",numberOfFile
input_filename = file
output_filename = input_filename[0:(len(input_filename)-3)]+"png"
# load file into buffer
data = np.fromfile(inputFolder+"/"+input_filename, dtype=np.uint8)
#To get RGB image
# create yuv image
yuv = np.ndarray((height + height / 2, width), dtype=np.uint8, buffer=data)
# create a height x width x channels matrix with the datatype uint8 for rgb image
img = np.zeros((height, width, channels), dtype=np.uint8);
# convert yuv image to rgb image
cv2.cvtColor(yuv, cv2.COLOR_YUV2BGRA_NV21, img, channels)
cv2.imwrite(outputFolderRGB+"/"+output_filename, img)
#If u saved the image in graysacale use this part instead
#yuvReal = np.ndarray((height, width), dtype=np.uint8, buffer=data)
#cv2.imwrite(outputFolderGray+"/"+output_filename, yuvReal)
else:
print "not any input"
You just have to put your .txt in a folder input
It's a python script but if you prefer a c++ version it's very close.