I am trying to add skin tone to an image in flutter canvas.
I've used the following code before to apply chroma by altering pixels when I load the image:
static Future<ui.Image> applyChromaToImage(ui.Image image, String pathForImage, RGBPixel chromaToApply, {RGBPixel previousChromaColor}) async
{
List<ChromaPointRange> chromaPoints = processChromaImageBytes(
image, imgBytes);
for(ChromaPointRange rnge in chromaPoints)
{
for(int y = rnge.yValStart; y <= rnge.yValEnd; y++)
{
RGBPixel currentPixel = RGBPixel.generatePixelFromImagePos(imgBytes, image.width, rnge.xVal, y);
//replace current pixel with skin tone
RGBPixel newPixl = currentPixel.mergeSkinTonePixel(chromaToApply, previousChromaColor);
imgBytes.setUint32((y * image.width + rnge.xVal) * 4, newPixl.getHex());
}
}
final Completer<ui.Image> imageCompleter = new Completer();
//looks the endian format doesn't get set right here
ui.PixelFormat formatToUse = Endian.host == Endian.little ? ui.PixelFormat.rgba8888 : ui.PixelFormat.bgra8888;
ui.decodeImageFromPixels(
imgBytes.buffer.asUint8List(),
image.width,
image.height,
formatToUse,
(ui.Image result) {
imageCompleter.complete(result);
// use your result image
},
);
//return image;
return await imageCompleter.future;
}
static List<ChromaPointRange> processChromaImageBytes(ui.Image image, ByteData imgBytes)
{
List<ChromaPointRange> chromaPoints = [];
ChromaPointRange currentPoints = null;
for(int x = 0; x < image.width; x = x + 1)
{
for(int y = 0; y < image.height; y = y + 1)
{
RGBPixel currentPixel = RGBPixel.generatePixelFromImagePos(imgBytes, image.width, x, y);
if(currentPixel.isSkinTonePixel())
{
if(currentPoints == null)
{
currentPoints = ChromaPointRange.fromEmpty();
currentPoints.xVal = x;
currentPoints.yValStart = y;
}
}
else if(currentPoints != null)
{
currentPoints.yValEnd = y - 1;
chromaPoints.add(currentPoints);
currentPoints = null;
}
}
if(currentPoints != null)
{
currentPoints.yValEnd = image.height - 1;
chromaPoints.add(currentPoints);
currentPoints = null;
}
}
return chromaPoints;
}
which basically checks every pixel in the image to see if it's within a range of the target color ( with is RGB 0, 255, 0), then adjusts the pixel if it is. This works, but takes a really long time ~ 3 seconds for a 1920 x 1080 image.
The end result is that I want to paint the image to a canvas with a skin tone applied. I've tried a different strategy, by painting the color underneath the image, and then trying to mask out that color from the image using canvas color filters. This is 1000% faster, but doesn't quite work.
Here is the code:
renderSprite(Canvas canvasToRender, Offset offsetToRender)
{
Paint imgPaint = new Paint();
if(chromaToApply != null)
{
Paint chromaPaint = new Paint();
chromaPaint.colorFilter = ColorFilter.mode(Color.fromRGBO(chromaToApply.redVal, chromaToApply.greenVal, chromaToApply.blueVal, 1), BlendMode.modulate);
canvasToRender.drawImage(spriteImage, offsetToRender, chromaPaint);
imgPaint.colorFilter = ColorFilter.mode(Color.fromRGBO(0, 255, 0, 1), BlendMode.dstOut);
}
if(spriteImage != null)
canvasToRender.drawImage(spriteImage, offsetToRender, imgPaint);
}
Here is the image that is painted underneath
Here is the image that is painted ontop
So I'm trying to mask out the green so the tan skin tone shows through on specific parts of the image.
I can't seem to find any combination of ColorFilter or anything else that will mask out the green color for me from the canvas. Any suggestions?
I need to convert List<List<int>>> to an dart:ui.Image. I have an algorithm to convert an integer to a color. I tried to do this by drawing one-pixel rectangles on the dart:ui.Canvas, but it is about 100 times slower than I expected! _getPixelColor is the method by which I convert int to Color. Here is my code:
Future<void> matrixToImage(FField sourceMatrix) async {
PictureRecorder p = PictureRecorder();
Canvas c = Canvas(
p,
Rect.fromLTWH(0, 0, sourceMatrix.length.toDouble(),
sourceMatrix[0].length.toDouble()));
Paint paint = Paint();
for (int x = 0; x < sourceMatrix.length; x++) {
for (int y = 0; y < sourceMatrix[0].length; y++) {
int pixelValue = sourceMatrix[x][y];
paint.color = _getPixelColor(pixelValue / 40 / paletteLength + paletteOffset);
c.drawRect(Rect.fromLTWH(x.toDouble(), y.toDouble(), 1, 1), paint);
}
}
Picture picture = p.endRecording();
Image result = await picture.toImage(sourceMatrix.length, sourceMatrix[0].length);
}
If you can compute your int value to an int value that the Image class understands, you don't need the detour through canvas drawing.
Just use Image.fromBytes with the .value of the Color as the list of ints.
That depends on how the image data is stored in the list of INTs.
Generally, I like this package to manipulate images and display them:
https://pub.dev/packages/image
import 'package:image/image.dart' as im;
im.Image? img = decodeImage(bytes); // different-IMAGE class !
Uint8List uint8list = getEncodedJpg(img!,quality: 85) as Uint8List;
// ...
build(BuildContext context) {
return Container(child:
Image.memory(uint8list) // material-IMAGE class !
);
}
I'm trying to get the radius of a Circle Shape after I scaled it. I don't know how to do it. I'm developing a software (with javaFX library) that will let the user add Shapes (Circle, Rectangle, Squares,...)to a Panel, simply "modify" them, save the drawing and load it. I'm managing the saving/loading stuff by cycling through the Nodes the user added (there's a button to create Circle, Rectangle, ... and add it to the Panel), taking the properties I need to load them (centerX, centerY, scaleX, scaleY, ...) and, when button "save" pressed, to save them in a txt file. (stil working on loading, but my thought was to read the previous txt file and cycle it to re-create Shapes on panel in the same position, scale, rotation.)
Here's the code I wrote this is the portion of code that create circle:
public Circle createCircle(double x, double y, double r, Color color) {
Circle circle = new Circle(x, y, r, color);
circle.setCursor(Cursor.HAND);
circle.setOnMousePressed((t) -> {
orgSceneX = t.getSceneX();
orgSceneY = t.getSceneY();
Circle c = (Circle) (t.getSource());
c.toFront();
});
circle.setOnMouseDragged((t) -> {
double offsetX = t.getSceneX() - orgSceneX;
double offsetY = t.getSceneY() - orgSceneY;
Circle c = (Circle) (t.getSource());
c.setCenterX(c.getCenterX() + offsetX);
c.setCenterY(c.getCenterY() + offsetY);
orgSceneX = t.getSceneX();
orgSceneY = t.getSceneY();
});
return circle;
}
Here's the code i use to scale it:
public void addMouseScrolling(Node node) {
node.setOnScroll((ScrollEvent event) -> {
// Adjust the zoom factor as per your requirement
double zoomFactor = 1.05;
double deltaY = event.getDeltaY();
if (deltaY < 0){
zoomFactor = 2.0 - zoomFactor;
}
node.setScaleX(node.getScaleX() * zoomFactor);
node.setScaleY(node.getScaleY() * zoomFactor);
});
}
And this is the code of the save button:
Button btn2 = new Button("Save");
btn2.setOnAction(new EventHandler<ActionEvent>() {
public void handle(ActionEvent event) {
int i=0;
for (Node node: root.getChildren()) i++;
String stringa_salvataggio[] = new String[i];
String stringa="";
i=0;
String scal_X_s, scal_Y_s, Lyt_X_s, Lyt_Y_s, width_s, height_s;
for (Node node: root.getChildren()){
String stringa_nodo = node.toString();
Double scal_X = node.getScaleX();
scal_X_s = Double.toString(scal_X);
Double scal_Y = node.getScaleY();
scal_Y_s = Double.toString(scal_X);
Double Lyt_X = node.getLayoutX();
Lyt_X_s = Double.toString(scal_X);
Double Lyt_Y = node.getLayoutY();
Lyt_Y_s = Double.toString(scal_X);
Double width = node.getLayoutBounds().getWidth();
width_s = Double.toString(scal_X);
Double height = node.getLayoutBounds().getHeight();
height_s = Double.toString(scal_X);
stringa = stringa + scal_X_s + scal_Y_s + "\n";
stringa_salvataggio[i] = stringa;
System.out.println("-------");
i++;
System.out.println("Nodo " + i + ":");
System.out.println("scalX:" + scal_X);
System.out.println("scalY:" + scal_Y);
System.out.println("LayoutX:" + Lyt_X);
System.out.println("LayoutY:" + Lyt_Y);
System.out.println("Width:" + width);
System.out.println("Height:" + height);
System.out.println(stringa_nodo);
//save on .txt
}
System.out.println(stringa_salvataggio);
}
});
As I said before, I don't know how to get the radius of Circle to recreate it on loading. Rectangles and squares aren't a problem, I can take all infos I need.
Thanks in advance.
Is there any way that I can detect color pages in a PDF file?
For example I have a PDF file with 5 pages, and the first and last page are in color. How can I detect the color pages? Can iText do it?
Now my solution is to convert PDF to images and then detect images color or black&white, but it takes too long time to do it, I need a fast way.
Convert pdf to image by Adobe Acrobat Code as fellows
public static void ConvertPDF2Image(string pdfInputPath, string imageOutputPath,
string imageName, int startPageNum, int endPageNum, ImageFormat imageFormat, double resolution)
{
Acrobat.CAcroPDDoc pdfDoc = null;
Acrobat.CAcroPDPage pdfPage = null;
Acrobat.CAcroRect pdfRect = null;
Acrobat.CAcroPoint pdfPoint = null;
// Create the document (Can only create the AcroExch.PDDoc object using late-binding)
// Note using VisualBasic helper functions, have to add reference to DLL
pdfDoc = (Acrobat.CAcroPDDoc)Microsoft.VisualBasic.Interaction.CreateObject("AcroExch.PDDoc", "");
// validate parameter
if (!pdfDoc.Open(pdfInputPath)) { throw new FileNotFoundException(); }
if (!Directory.Exists(imageOutputPath)) { Directory.CreateDirectory(imageOutputPath); }
if (startPageNum <= 0) { startPageNum = 1; }
if (endPageNum > pdfDoc.GetNumPages() || endPageNum <= 0) { endPageNum = pdfDoc.GetNumPages(); }
if (startPageNum > endPageNum) { int tempPageNum = startPageNum; startPageNum = endPageNum; endPageNum = startPageNum; }
if (imageFormat == null) { imageFormat = ImageFormat.Jpeg; }
if (resolution <= 0) { resolution = 1; }
// start to convert each page
for (int i = startPageNum; i <= endPageNum; i++)
{
pdfPage = (Acrobat.CAcroPDPage)pdfDoc.AcquirePage(i - 1);
pdfPoint = (Acrobat.CAcroPoint)pdfPage.GetSize();
pdfRect = (Acrobat.CAcroRect)Microsoft.VisualBasic.Interaction.CreateObject("AcroExch.Rect", "");
int imgWidth = (int)((double)pdfPoint.x * resolution);
int imgHeight = (int)((double)pdfPoint.y * resolution);
pdfRect.Left = 0;
pdfRect.right = (short)imgWidth;
pdfRect.Top = 0;
pdfRect.bottom = (short)imgHeight;
// Render to clipboard, scaled by 100 percent (ie. original size)
// Even though we want a smaller image, better for us to scale in .NET
// than Acrobat as it would greek out small text
pdfPage.CopyToClipboard(pdfRect, 0, 0, (short)(100 * resolution));
IDataObject clipboardData = Clipboard.GetDataObject();
if (clipboardData.GetDataPresent(DataFormats.Bitmap))
{
Bitmap pdfBitmap = (Bitmap)clipboardData.GetData(DataFormats.Bitmap);
pdfBitmap.Save(Path.Combine(imageOutputPath, imageName) + i.ToString() + "." + imageFormat.ToString(), imageFormat);
pdfBitmap.Dispose();
}
}
pdfDoc.Close();
Marshal.ReleaseComObject(pdfPage);
Marshal.ReleaseComObject(pdfRect);
Marshal.ReleaseComObject(pdfDoc);
Marshal.ReleaseComObject(pdfPoint);
}
////detect image Color or black and white as fellows
Bitmap box1 = new Bitmap(PictureBox1.Image);
Color c = new Color()
int rr, gg, bb;
for(int i=0;i<PictureBox1.Width;i++){
for(int j=0;j<PictureBox1.Height;j++){
c= box1.GetPixel(i,j);
rr= c.R; gg=c.g;bb=c.B;
if(c ==Color.Black||c= Color.White){
MessageBox.Show("black and white dot")
}
else {
if(rr==gg==bb){
MessageBox.Show("Gray dot");
}
else {
MessageBOx.Show("Color dot");
}
}
}
}
I am doing a real time motion detection program. I find that there are a lot of contour made in my different image after i used background subtraction method . i would like to ask is there any method that can merge these contour together or make a larger rect contain all the contours?
the case now i have been done
http://singhgaganpreet.files.wordpress.com/2012/07/motioncolour.jpg
My code is here
#include <iostream>
#include <OpenCV/cv.h>
#include <OPenCV/highgui.h>
using namespace cv;
using namespace std;
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cvFindContours( differenceImg, storage, &contours );
//draw bounding box around each contour
for(; contours!=0; contours = contours->h_next)
{
rect = cvBoundingRect(contours, 0); //extract bounding box for current contour
//drawing rectangle
cvRectangle(currentFrame,
cvPoint(rect.x, rect.y),
cvPoint(rect.x+rect.width, rect.y+rect.height),
cvScalar(0, 0, 255, 0),
2, 8, 0);
}
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(¤tFrame);
cvReleaseImage(¤tFrame_grey);
//cvReleaseCapture(&cam);
return 0;
}
Did you try this?
std::vector<cv::Point> points;
points.insert(points.end(), contour1.begin(), contour1.end());
points.insert(points.end(), contour2.begin(), contour2.end());
convexHull(cv::Mat(points), contour);
PS. For some applications, it may be better to use approxPoly() rather than convexHull(). Just try both.
PPS. Try smoothing the resulting contour with gaussian. It also can be helpful.
I came across a similar problem. In my case I created an empty sequence then I filled it with the points of each contour, after that I fitted a bounding ellipse with that sequence.
Here is my code segment...
CvMemStorage *storage = cvCreateMemStorage ();
CvMemStorage *storage1 = cvCreateMemStorage ();
CvSeq *contours = 0;
//find contour in BInv
cvFindContours (BInv, storage, &contours, sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_NONE ,cvPoint(0,0));
//creating empty sequence of CvPoint
CvSeq* seq = cvCreateSeq(CV_SEQ_ELTYPE_POINT/*| CV_SEQ_KIND_SET | CV_SEQ_FLAG_SIMPLE*/,sizeof(CvSeq),sizeof(CvPoint),storage1);
//populating seq with all contours
for(; contours!=0; contours = contours->h_next)
for(int i=0;i<contours->total;i++)
{
CvPoint* p;
p = (CvPoint*)cvGetSeqElem (contours, i );
cvSeqPush(seq,p);
}
//bounding box and drawing
CvBox2D bbox=cvMinAreaRect2(seq, NULL );
cvEllipseBox(color,bbox,cvScalarAll(0),5,8,0);
hope this helps.
If you want to merge contours on the basis of distance apart then you can do something like this:
struct hash_pair {
template <class T1, class T2>
size_t operator()(const pair<T1, T2>& p) const
{
auto hash1 = hash<T1>{}(p.first);
auto hash2 = hash<T2>{}(p.second);
if (hash1 != hash2) {
return hash1 ^ hash2;
}
return hash1;
}
};
void findPixelsNearby(unordered_map<pair<int, int>,bool,hash_pair>&res, Point px,int pxlVal) {
for (int itr1 = (px.x) - pxlVal; itr1 <= (px.x) + pxlVal; itr1++) {
for (int itr2 = (px.y - pxlVal); itr2 <= (px.y) + pxlVal; itr2++) {
res[{itr1, itr2}] = true;
}
}
}
unordered_map<pair<int, int>, bool, hash_pair> createSets(vector<Point2f>Contour, int rect) {
unordered_map<pair<int,int>,bool,hash_pair>res;
for (auto tra : Contour) {
Point px = (Point)tra;
findPixelsNearby(res,px,rect);
}
return res;
}
//void drawContour(Mat& img, vector<Point2f>s1,int px,int py,int pz) {
// for (auto x : s1) {
// line(img, x, x, Scalar(px, py, pz), 4, 0);
//
// }
// resShow("temp",img,1);
//}
bool hasCommon(unordered_map<pair<int,int>,bool,hash_pair>s1, unordered_map<pair<int, int>, bool, hash_pair>s2){
for (auto x : s1) {
if (s2.find(x.first) != s2.end()) {
return true;
}
}
return false;
}
void MergeContours(Mat image, vector<Contour>&usableContours,int distance_considered, vector<Contour>& finalContours) {
int numberContours = usableContours.size();
vector<vector<int>>ids_for_contour_merge(numberContours);
vector<unordered_map<pair<int, int>, bool, hash_pair>>ContourSets;
vector<bool>boolVals(numberContours,false);
for (int i = 0; i < numberContours; i++) {
ContourSets.push_back(createSets(usableContours[i].points, distance_considered/2));
}
for (int i = 0; i < numberContours; i++) {
if (boolVals[i] == false) {
boolVals[i] = true;
for (int j = i+1; j < numberContours; j++) {
if (boolVals[j] == false) {
if(hasCommon(ContourSets[i], ContourSets[j])==true){
ContourSets[i].insert(ContourSets[j].begin(), ContourSets[j].end());
boolVals[j] = true;
ids_for_contour_merge[i].push_back(j);
j = i;
}
}
}
}
}
vector<bool>Visited(ids_for_contour_merge.size(), false);
for (int mr = 0; mr < ids_for_contour_merge.size(); mr++) {
if (Visited[mr] == false) {
vector<Point2f>temp=usableContours[mr].points;
if (ids_for_contour_merge[mr].size() > 0) {
for (int mc = 0; mc < ids_for_contour_merge[mr].size(); mc++) {
int valPtr = ids_for_contour_merge[mr][mc];
copy(usableContours[valPtr].points.begin(), usableContours[valPtr].points.end(), std::back_inserter(temp));
Visited[valPtr] = true;
}
}
else {
Visited[mr] = true;
}
Contour newCtr;
newCtr.points = temp;
finalContours.push_back(newCtr);
}
}
///////////////////////////////////////////////////////////////DRAWING CONTOURS
/*for (auto x : finalContours) {
cout <<"CONTOURS FINAL SIZE IS : " <<x.points.size()<<endl;
int px = 0;
int py = 0;
int pz = 0;
drawContour(image, x.points, ((px+rand())%255), ((py + rand()) % 255), ((pz + rand()) % 255));
}*/
//////////////////////////////////////////////////////////////////////////////
}
More On Github: https://github.com/HimanshuYadav117/Merge-Contours/blob/main/MergeContours.cpp