Image dimension, ByteBuffer size and format don't match - flutter

I'm trying to make a face recognition app in flutter. Most of the code is taken from here. That project used Firebase ML Vision(which is now deprecated), so I followed the migration guide to Google ML Kit. I made changes to the face detection part of the code.
Following is the code for detect function:
Future<List<Face>> detect(CameraImage image, InputImageRotation rotation) {
final faceDetector = GoogleMlKit.vision.faceDetector(
const FaceDetectorOptions(
mode: FaceDetectorMode.accurate,
enableLandmarks: true,
),
);
return faceDetector.processImage(
InputImage.fromBytes(
bytes: image.planes[0].bytes,
inputImageData:InputImageData(
inputImageFormat:InputImageFormatMethods.fromRawValue(image.format.raw)!,
size: Size(image.width.toDouble(), image.height.toDouble()),
imageRotation: rotation,
planeData: image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
);
},
).toList(),
),
),
);
}
When I call this function, I get the following error:
I'm unable to figure out where I'm doing something wrong.
Here's the initializeCamera function(detect function is called inside it):
void _initializeCamera() async {
CameraDescription description = await getCamera(_direction);
InputImageRotation rotation = rotationIntToImageRotation(
description.sensorOrientation,
);
_camera =
CameraController(description, ResolutionPreset.ultraHigh, enableAudio: false);
await _camera!.initialize();
await loadModel();
//await Future.delayed(const Duration(milliseconds: 500));
tempDir = await getApplicationDocumentsDirectory();
String _embPath = tempDir!.path + '/emb.json';
jsonFile = File(_embPath);
if (jsonFile!.existsSync()) data = json.decode(jsonFile!.readAsStringSync());
_camera!.startImageStream((CameraImage image)async {
if (_camera != null) {
if (_isDetecting) {
return;
}
_isDetecting = true;
String res;
dynamic finalResult = Multimap<String, Face>();
List<Face> faces = await detect(image, rotation); <------------------ Detect Function
if (faces.isEmpty) {
_faceFound = false;
} else {
_faceFound = true;
}
Face _face;
imglib.Image convertedImage =
_convertCameraImage(image, _direction);
for (_face in faces) {
double x, y, w, h;
x = (_face.boundingBox.left - 10);
y = (_face.boundingBox.top - 10);
w = (_face.boundingBox.width + 10);
h = (_face.boundingBox.height + 10);
imglib.Image croppedImage = imglib.copyCrop(
convertedImage, x.round(), y.round(), w.round(), h.round());
croppedImage = imglib.copyResizeCropSquare(croppedImage, 112);
// int startTime = new DateTime.now().millisecondsSinceEpoch;
res = _recog(croppedImage);
// int endTime = new DateTime.now().millisecondsSinceEpoch;
// print("Inference took ${endTime - startTime}ms");
finalResult.add(res, _face);
}
setState(() {
_scanResults = finalResult;
});
_isDetecting = false;
}
});
}
EDIT: I finally got the solution
The following "detect" function solved the problem for me:
Future<List<Face>> detect(CameraImage image, InputImageRotation rotation) {
final faceDetector = GoogleMlKit.vision.faceDetector(
const FaceDetectorOptions(
mode: FaceDetectorMode.accurate,
enableLandmarks: true,
),
);
final WriteBuffer allBytes = WriteBuffer();
for (final Plane plane in image.planes) {
allBytes.putUint8List(plane.bytes);
}
final bytes = allBytes.done().buffer.asUint8List();
final Size imageSize =
Size(image.width.toDouble(), image.height.toDouble());
final inputImageFormat =
InputImageFormatMethods.fromRawValue(image.format.raw) ??
InputImageFormat.NV21;
final planeData = image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
);
},
).toList();
final inputImageData = InputImageData(
size: imageSize,
imageRotation: rotation,
inputImageFormat: inputImageFormat,
planeData: planeData,
);
return faceDetector.processImage(
InputImage.fromBytes(
bytes: bytes,
inputImageData:inputImageData
),
);
}

The problem is in this function
faceDetector.processImage(
InputImage.fromBytes(
bytes: image.planes[0].bytes,
inputImageData:InputImageData(
inputImageFormat:InputImageFormatMethods.fromRawValue(image.format.raw)!,
size: Size(image.width.toDouble(), image.height.toDouble()),
imageRotation: rotation,
planeData: image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
);
},
).toList(),
),
),
The solution is instead of taking bytes of only first plane image.planes[0].bytes combine bytes from all planes using
faceDetector.processImage(
InputImage.fromBytes(
bytes: Uint8List.fromList(
image.planes.fold(
<int>[],
(List<int> previousValue, element) =>
previousValue..addAll(element.bytes)),
),
inputImageData:InputImageData(
inputImageFormat:InputImageFormatMethods.fromRawValue(image.format.raw)!,
size: Size(image.width.toDouble(), image.height.toDouble()),
imageRotation: rotation,
planeData: image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
);
},
).toList(),
),
),
I think this is because of difference between the way ios and android CameraImage format. On Android CameraImage has multiple planes and all of them have byte data so we have to combine them all. I am not sure how it works on Ios.

The answer from #mumboFromAvnotaklu worked for me and should be accepted as the answer. Below I have just updated the code to work with the latest versions of the Google ML Kit.
if (image.planes.isNotEmpty) {
// There are usually a few planes per image, potentially worth looking
// at some sort of best from provided planes solution
InputImageData iid = InputImageData(
inputImageFormat: InputImageFormatValue.fromRawValue(image.format.raw)!,
size: Size(image.width.toDouble(), image.height.toDouble()),
imageRotation: InputImageRotation.rotation90deg,
planeData: image.planes
.map((Plane plane) => InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
))
.toList(),
);
Uint8List bytes = Uint8List.fromList(
image.planes.fold(<int>[], (List<int> previousValue, element) => previousValue..addAll(element.bytes)),
);
return InputImage.fromBytes(
bytes: bytes,
inputImageData: iid,
);
}

Even OPs solution didnt work for me, finally I found a different solution
First change the dependency from ml_kit to the face detection specific library so that this works
google_mlkit_face_detection: ^0.0.1
I am only including what code needs to be changed.
InputImageData _inputImageData = InputImageData(
imageRotation:
_cameraService.cameraRotation ?? InputImageRotation.Rotation_0deg,
inputImageFormat:
InputImageFormatMethods.fromRawValue(image.format.raw) ??
InputImageFormat.NV21,
size:
Size(image.planes[0].bytesPerRow.toDouble(), image.height.toDouble()),
planeData: image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: image.height,
width: image.width,
);
},
).toList(),
);
final WriteBuffer allBytes = WriteBuffer();
for (Plane plane in image.planes) {
allBytes.putUint8List(plane.bytes);
}
final bytes = allBytes.done().buffer.asUint8List();
InputImage _inputImage = InputImage.fromBytes(
bytes: bytes,
inputImageData: _inputImageData,
);
return faceDetector.processImage(
InputImage.fromBytes(
bytes: bytes,
inputImageData:inputImageData
),
);
For more information link to the forum that gave me this solution Click here

Related

Live text recognition ( region of interest)

i have live text recognition i used library https://pub.dev/packages/google_mlkit_text_recognition and https://pub.dev/packages/camera,
but i have some problem...
i need detect text only in marked part...
Get live preview function:
//
Future _processCameraImage(CameraImage image) async {
final WriteBuffer allBytes = WriteBuffer();
for (final Plane plane in image.planes) {
allBytes.putUint8List(plane.bytes);
}
final bytes = allBytes.done().buffer.asUint8List();
final Size imageSize =
Size(image.width.toDouble(), image.height.toDouble());
//
final camera = cameras[_cameraIndex];
final imageRotation =
InputImageRotationValue.fromRawValue(camera.sensorOrientation) ??
InputImageRotation.rotation0deg;
final inputImageFormat =
InputImageFormatValue.fromRawValue(image.format.raw) ??
InputImageFormat.nv21;
final planeData = image.planes.map(
(Plane plane) {
return InputImagePlaneMetadata(
bytesPerRow: plane.bytesPerRow,
height: plane.height,
width: plane.width,
);
},
).toList();
//
final inputImageData = InputImageData(
size: imageSize,
imageRotation: imageRotation,
inputImageFormat: inputImageFormat,
planeData: planeData,
);
final inputImage =
InputImage.fromBytes(bytes: bytes, inputImageData: inputImageData);
//
widget.onImage(inputImage);
}
Processing image function:
//
Future<void> processImage(InputImage inputImage) async {
if (!_canProcess) return;
if (_isBusy) return;
_isBusy = true;
final recognizedText = await _textRecognizer.processImage(inputImage);
//
if (mounted) {
for (var element in recognizedText.blocks) {
for (var line in element.lines) {
for (var txt in line.elements) {
if (txt.text.length == 17) {
setState(() {
_text = txt.text;
});
}
}
}
}
}
_isBusy = false;
}
}
I had a similar task, I used the module mask_for_camera_view
create your own frame and find the values ​​of the cropped picture
more details & photo example in github

Flutter- Android - App crash When Using faceDetector on imageStream

whenever I use the app with face detector in debug, profile, or release mode it works fine but when i build the app and install it then start it the app automatically close after the camera stream starts
im using google_ml_kit: ^0.7.3 and camera: ^0.9.4+14
this is the code i use to initialize the camera and start detecting every time
void initCamera() async {
final description = await availableCameras().then(
(cameras) => cameras.firstWhere(
(camera) => camera.lensDirection == CameraLensDirection.front,
),
);
cameraControllerNotifier.value = CameraController(
description,
ResolutionPreset.low,
enableAudio: false,
);
await cameraControllerNotifier.value!.initialize();
await Future.delayed(const Duration(milliseconds: 500));
isDetecting = false;
cameraControllerNotifier.value!.startImageStream((img) async {
if (isDetecting) return;
if (cameraControllerNotifier.value != null) {
isDetecting = true;
final image = InputImage.fromBytes(
bytes: img.planes[0].bytes,
inputImageData: InputImageData(
inputImageFormat:
InputImageFormatMethods.fromRawValue(img.format.raw)!,
size: Size(img.width.toDouble(), img.height.toDouble()),
imageRotation: MlHelper.rotationIntToImageRotation(
description.sensorOrientation,
),
planeData: null,
),
);
try {
final faceDetector = GoogleMlKit.vision.faceDetector(
const FaceDetectorOptions(
mode: FaceDetectorMode.accurate,
enableLandmarks: true,
),
);
List<Face> _faces = await faceDetector.processImage(image);
if (_faces.isNotEmpty) {
//..........
} else {
isClose.value = false;
}
isDetecting = false;
// facesNotifier.value = _faces;
} catch (e) {
isClose.value = false;
isDetecting = false;
log("FaceKIt Error : $e");
}
}
});
if (mounted) {
setState(() {});
}
}

Can I use Async inside Dart Isolate ??? its not working

I am using ImageEditor package to merge different images. below is my code. its working perfectly fine without using Isolate, when i use it with isolate, i get null error.
Working code without Isolate
startEditing() async {
for (var i = 0; i < image1.length || i == 0; i++) {
if (image1.isNotEmpty) {
img1 = await File(image1[i].path).readAsBytes();
}
for (var i = 0; i < image2.length || i == 0; i++) {
if (image2.isNotEmpty) {
img2 = await File(image2[i].path).readAsBytes();
}
final ImageEditorOption optionGroup = ImageEditorOption();
optionGroup.outputFormat = const OutputFormat.png(100);
optionGroup.addOptions([
MixImageOption(
x: 0,
y: 0,
width: 1000,
height: 1000,
target: MemoryImageSource(img1),
),
MixImageOption(
x: 0,
y: 0,
width: 1000,
height: 1000,
target: MemoryImageSource(img2),
),
]);
try {
final Uint8List? result = await ImageEditor.editImage(
image: mainImg, imageEditorOption: optionGroup);
if (result == null) {
image = null;
} else {
await saveImage(result, index);
setState(() {
image = MemoryImage(result);
index++;
});
}
} catch (e) {
print(e);
}
}
}
}
Code with Isolate not working
startEditing(SendPort sendPort) async {
for (var i = 0; i < image1.length || i == 0; i++) {
if (image1.isNotEmpty) {
img1 = await File(image1[i].path).readAsBytes();
}
for (var i = 0; i < image2.length || i == 0; i++) {
if (image2.isNotEmpty) {
img2 = await File(image2[i].path).readAsBytes();
}
final ImageEditorOption optionGroup = ImageEditorOption();
optionGroup.outputFormat = const OutputFormat.png(100);
optionGroup.addOptions([
MixImageOption(
x: 0,
y: 0,
width: 1000,
height: 1000,
target: MemoryImageSource(img1),
),
MixImageOption(
x: 0,
y: 0,
width: 1000,
height: 1000,
target: MemoryImageSource(img2),
),
]);
try {
final Uint8List? result = await ImageEditor.editImage(
image: mainImg, imageEditorOption: optionGroup);
if (result == null) {
image = null;
} else {
await saveImage(result, index);
image = MemoryImage(result);
index++;
sendPort.send(image);
}
} catch (e) {
print(e);
}
}
}
}
saveImage method
Future<String> saveImage(Uint8List bytes, int i) async {
final name = '${filenames[i]}';
final result = await ImageGallerySaver.saveImage(bytes, name: name);
print(result);
return result['filePath'];
}
Receiving in main thread
getImageas() async {
ReceivePort receiverPort =
ReceivePort();
final isolate =
await Isolate.spawn(startEditing, receiverPort.sendPort);
receiverPort.listen((data) {
print('Receiving: ' + data + ', ');
});
}
I get this error:
I/flutter (21937): Null check operator used on a null value
in this line:
final Uint8List? result = await ImageEditor.editImage(
image: mainImg, imageEditorOption: optionGroup);
I am sure that img1, img2, mainImg, image1, image2 values are not null... check 1000 times. I have also used flutter compute, and same result.
Flutter plugins that call into native code (such as image_editor) do not work in isolates spawned by Isolate.spawn.
The flutter_isolate package spawns isolates from native code for this reason. You should be able to use it to call image_editor in an isolate. (Disclaimer: I've never used flutter_isolate.)

How to add network image in pdf in flutter?

Hello guys, I was trying to add network image in the pdf. The pdf is
created using printing: ^5.5.0 plugin . I am getting run time
error. Please help out, Thanks in advance.
previewpage.dart
final pdf = pw.Document();
MemoryImage images = [];
Future _getImage() async {
for (var i = 0; i < 2; i++) {
var response = await http
.get(Uri.parse("image url here"));
final image =
pw.RawImage(bytes: response.bodyBytes, width: 50, height: 100);
images.add(image);
}
_writeOnPdf();
}
using printing: ^5.5.0
try {
final provider = await flutterImageProvider(NetworkImage(
"https://miro.medium.com/max/1000/1*ilC2Aqp5sZd1wi0CopD1Hw.png"));
images.add(provider);
} catch (e) {
print("****ERROR: $e****");
return;
}
pw.Center(
child: pw.Padding(
padding: const pw.EdgeInsets.all(3),
child: pw.Image(
images[i],
width: 50,
height: 100,
),
),
)

Issues with Image selection from disk Flutter

I have this form where I need to upload a profilePic and companyLogo. Earlier I faced some issues regarding updating images on the selection which I fixed using : imageCache.clear() & imageCache.clearLiveImages() and passing Unique key to Image.file widgets.
Now, the problem is, if I select the profile Pic then select the company Logo & When I send the images as Multipart in FormData, it uses the file which I selected later for both, i.e, if I select _companyLogo after _profilePic, it replaces _profilePic data with _companyLogo, however the displaying images remains correct for Image.file widget.
//Widgets using GestureDetector to call onAddProfilePic() and onAddCompanyLogo()
File _profilePic;
Key _keyProfilePic = Key('key1');
Image.file(
_profilePic,
fit: BoxFit.cover,
key: _keyProfilePic,
)
File _companyLogo;
Key _keyCompanyLogo = Key('key2');
Image.file(
_companyLogo,
fit: BoxFit.cover,
key: _keyCompanyLogo,
)
onAddProfilePic(){
Utils.selectImage(context, (newPic) {
if(newPic != null){
_profilePic = newPic;
_keyProfilePic = Key(Uuid().v4());
setState(() {});
}
});
}
onAddCompanyLogo(){
Utils.selectImage(context, (newPic) {
if(newPic != null){
_companyLogo = newPic;
_keyCompanyLogo = Key(Uuid().v4());
setState(() {});
}
});
}
The function selectImage is in Utils Class
static Future<void> selectImage(context, callback, {int minSize = 480, double ratioX = 1.0, double ratioY = 1.0}) async {
int sourceSelected = await showDialog(context: context, builder: (context) => DialogImagePicker());
if(sourceSelected == null) return;
var pickedImage = await ImagePicker().getImage(source: sourceSelected == 0 ? ImageSource.camera : ImageSource.gallery);
if(pickedImage==null) return;
File croppedFile = await ImageCropper.cropImage(
maxWidth: (minSize * ratioX).toInt(),
maxHeight: (minSize * ratioY).toInt(),
compressFormat: ImageCompressFormat.jpg,
sourcePath: pickedImage.path,
aspectRatio: CropAspectRatio(ratioX: ratioX, ratioY: ratioY),
compressQuality: 80,
androidUiSettings: AndroidUiSettings(
toolbarColor: kDarkBlueColor,
toolbarTitle: 'Crop Image',
hideBottomControls: true,
toolbarWidgetColor: Colors.white
),
);
if(croppedFile == null){
return;
}
croppedFile = croppedFile.renameSync(path.join(path.dirname(croppedFile.path), 'image'+'.jpg'));
print('Cropped file :$croppedFile');
imageCache.clear();
imageCache.clearLiveImages();
callback(croppedFile);
}
The problem is, since I was cropping the image & was changing the cropped file name, which was replacing previous images with newly selected ones. Not renaming so fixed the issue.
Additional to this, no unique key or image cache clearance is required.
Updated code:
File _profilePic;
Image.file(
_profilePic,
fit: BoxFit.cover,
)
File _companyLogo;
Image.file(
_companyLogo,
fit: BoxFit.cover,
)
onAddProfilePic(){
Utils.selectImage(context, (newPic) {
if(newPic != null){
_profilePic = newPic;
setState(() {});
}
});
}
onAddCompanyLogo(){
Utils.selectImage(context, (newPic) {
if(newPic != null){
_companyLogo = newPic;
setState(() {});
}
});
}
static Future<void> selectImage(context, callback, {int minSize = 480, double ratioX = 1.0, double ratioY = 1.0}) async {
int sourceSelected = await showDialog(context: context, builder: (context) => DialogImagePicker());
if(sourceSelected == null) return;
var pickedImage = await ImagePicker().getImage(source: sourceSelected == 0 ? ImageSource.camera : ImageSource.gallery);
if(pickedImage==null) return;
File croppedFile = await ImageCropper.cropImage(
maxWidth: (minSize * ratioX).toInt(),
maxHeight: (minSize * ratioY).toInt(),
compressFormat: ImageCompressFormat.jpg,
sourcePath: pickedImage.path,
aspectRatio: CropAspectRatio(ratioX: ratioX, ratioY: ratioY),
compressQuality: 80,
androidUiSettings: AndroidUiSettings(
toolbarColor: kDarkBlueColor,
toolbarTitle: 'Crop Image',
hideBottomControls: true,
toolbarWidgetColor: Colors.white
),
);
if(croppedFile == null){
return;
}
//croppedFile = croppedFile.renameSync(path.join(path.dirname(croppedFile.path), 'image'+'.jpg'));
callback(croppedFile);
}