Detect color pages or BW pages in PDF - itext

Is there any way that I can detect color pages in a PDF file?
For example I have a PDF file with 5 pages, and the first and last page are in color. How can I detect the color pages? Can iText do it?
Now my solution is to convert PDF to images and then detect images color or black&white, but it takes too long time to do it, I need a fast way.
Convert pdf to image by Adobe Acrobat Code as fellows
public static void ConvertPDF2Image(string pdfInputPath, string imageOutputPath,
string imageName, int startPageNum, int endPageNum, ImageFormat imageFormat, double resolution)
{
Acrobat.CAcroPDDoc pdfDoc = null;
Acrobat.CAcroPDPage pdfPage = null;
Acrobat.CAcroRect pdfRect = null;
Acrobat.CAcroPoint pdfPoint = null;
// Create the document (Can only create the AcroExch.PDDoc object using late-binding)
// Note using VisualBasic helper functions, have to add reference to DLL
pdfDoc = (Acrobat.CAcroPDDoc)Microsoft.VisualBasic.Interaction.CreateObject("AcroExch.PDDoc", "");
// validate parameter
if (!pdfDoc.Open(pdfInputPath)) { throw new FileNotFoundException(); }
if (!Directory.Exists(imageOutputPath)) { Directory.CreateDirectory(imageOutputPath); }
if (startPageNum <= 0) { startPageNum = 1; }
if (endPageNum > pdfDoc.GetNumPages() || endPageNum <= 0) { endPageNum = pdfDoc.GetNumPages(); }
if (startPageNum > endPageNum) { int tempPageNum = startPageNum; startPageNum = endPageNum; endPageNum = startPageNum; }
if (imageFormat == null) { imageFormat = ImageFormat.Jpeg; }
if (resolution <= 0) { resolution = 1; }
// start to convert each page
for (int i = startPageNum; i <= endPageNum; i++)
{
pdfPage = (Acrobat.CAcroPDPage)pdfDoc.AcquirePage(i - 1);
pdfPoint = (Acrobat.CAcroPoint)pdfPage.GetSize();
pdfRect = (Acrobat.CAcroRect)Microsoft.VisualBasic.Interaction.CreateObject("AcroExch.Rect", "");
int imgWidth = (int)((double)pdfPoint.x * resolution);
int imgHeight = (int)((double)pdfPoint.y * resolution);
pdfRect.Left = 0;
pdfRect.right = (short)imgWidth;
pdfRect.Top = 0;
pdfRect.bottom = (short)imgHeight;
// Render to clipboard, scaled by 100 percent (ie. original size)
// Even though we want a smaller image, better for us to scale in .NET
// than Acrobat as it would greek out small text
pdfPage.CopyToClipboard(pdfRect, 0, 0, (short)(100 * resolution));
IDataObject clipboardData = Clipboard.GetDataObject();
if (clipboardData.GetDataPresent(DataFormats.Bitmap))
{
Bitmap pdfBitmap = (Bitmap)clipboardData.GetData(DataFormats.Bitmap);
pdfBitmap.Save(Path.Combine(imageOutputPath, imageName) + i.ToString() + "." + imageFormat.ToString(), imageFormat);
pdfBitmap.Dispose();
}
}
pdfDoc.Close();
Marshal.ReleaseComObject(pdfPage);
Marshal.ReleaseComObject(pdfRect);
Marshal.ReleaseComObject(pdfDoc);
Marshal.ReleaseComObject(pdfPoint);
}
////detect image Color or black and white as fellows
Bitmap box1 = new Bitmap(PictureBox1.Image);
Color c = new Color()
int rr, gg, bb;
for(int i=0;i<PictureBox1.Width;i++){
for(int j=0;j<PictureBox1.Height;j++){
c= box1.GetPixel(i,j);
rr= c.R; gg=c.g;bb=c.B;
if(c ==Color.Black||c= Color.White){
MessageBox.Show("black and white dot")
}
else {
if(rr==gg==bb){
MessageBox.Show("Gray dot");
}
else {
MessageBOx.Show("Color dot");
}
}
}
}

Related

Add percent to boxes of image after prediction?

I'm working with darknet and want to add the prediction in percent next to the label with darknet like this:
rather than the default like this:
Is there a way to do this? after you've run darknet it displays the probability that the label surrounding the object is the actual label. This is what i want to add to the image itself.
I figured out how to add it. I've added the code for anyone with the same question in the future so it's copy and paste. remember to re-make darknet afterwards.
We need to replace draw_directions function in darknet/src/image.c with the following:
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class < 0) {
/* this line adds the percent to the image*/
sprintf(labelstr,"%s %f%%",names[j], (dets[i].prob[j]*100) );
class = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class >= 0){
int width = im.h * .006;
/*
if(0){
width = pow(prob, 1./2.)*10+1;
alphabet = 0;
}
*/
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}

Migrating add image to pdf from iText to PDFBox

In our project we are using iText 5.x version to manipulate PDF files and working to migrate that implementation with PDFBox 2.x version.
There is one add image to pdf page scenario and I have converted that code into PDFBox as good as I could do. :) In existing implementation(iText) they add image in template using PdfTemplate and added that template in annotation using PdfAnnotation class.
I don't know how to do that using PDFBox. Also please check that I migrated existing implementation properly or not as I'm newbie in PDF library using Java.
Add Image to PDF(Using iText):
Document document = null;
PdfReader pdfReader = null;
pdfReader = new PdfReader(SourceFilePath());
//we retrieve the total number of pages and the page size
int total = pdfReader.getNumberOfPages();
Rectangle rectangle = pdfReader.getPageSizeWithRotation(1);
document = new Document(rectangle);
PdfImportedPage page;
PdfCopy.PageStamp stamp;
// step 2: we create a PdfCopy object that listens to the document.
PdfCopy copy = new PdfCopy(document, new FileOutputStream(DestinationFilePath(false));
document.open();
// step 4: adding the content
for (int i = 1; i <= total; i++) {
page = copy.getImportedPage(pdfReader, i);
if(i == 1 || aBarcodeVO.getDisplacementVO().isApplyForAllPages()){
BufferedImage bufferedImage = getImage();
Image img = Image.getInstance(bufferedImage, null);
img.scaleToFit(qrImageSize, qrImageSize);
PdfName imgKey = new PdfName(aBarcodeVO.getImgUniqueId() + i);
Rectangle rectPage = pdfReader.getPageSizeWithRotation(i);
stamp = copy.createPageStamp(page);
PdfImage stream = new PdfImage(img, "", null);
stream.put(imgKey, imgKey);
PdfIndirectObject ref = copy.addToBody(stream);
int rotation = pdfReader.getPageRotation(i);
Rectangle cropBoxRect = pdfReader.getCropBox(i);;
//Explicitly Apply rotation to crop box rectangle.
for (int j = 0; j < (rotation/90); j++) {
cropBoxRect = cropBoxRect.rotate();
}
//added Image in template and template in Annotation and finally annotation is added in PDF through PdfCopy.PageStamp
PdfTemplate template = PdfTemplate.createTemplate(copy, img.getPlainWidth(), img.getPlainHeight());
img.setAbsolutePosition(0, 0);
img.setRotationDegrees(rotation);
img.setDirectReference(ref.getIndirectReference());
template.addImage(img);
Rectangle rect = new Rectangle(rectLlx, rectLly, rectUrx, rectUry);
rect.setBorderWidth(0.5f);
rect.setBorderColor(new BaseColor(0xFF, 0x00, 0x00));
PdfAnnotation annotation = PdfAnnotation.createStamp(copy, rect, null, "AnnotationOnly");
annotation.setAppearance(PdfAnnotation.APPEARANCE_NORMAL, template);
annotation.setFlags(PdfAnnotation.FLAGS_PRINT + PdfAnnotation.FLAGS_LOCKED);
annotation.setRotate(rotation);
PdfName annotKey = getAnnotKey(i);
annotation.put(annotKey, annotKey);
stamp.addAnnotation(annotation);
stamp.alterContents();
}
copy.addPage(page);
}
copy.freeReader(pdfReader);
try {
if (document != null) {
document.close();
}
} catch (Exception e) {
System.out.println("Exception in handleAddBarCode() while closing():document:" + e);
}
try {
if (pdfReader != null) {
pdfReader.close();
}
} catch (Exception e) {
System.out.println("Exception in handleAddBarCode() while closing():pdfReader:" + e);
}
Add Image to PDF(Using PDFBox):
PDDocument pdDocument = PDDocument.load(new File(SourceFilePath()));
int total = pdDocument.getNumberOfPages();
PDPage page = pdDocument.getDocumentCatalog().getPages().get(0);
PDRectangle rectangle = getRotatedMediaBox(page);
PDPage pdPage = new PDPage(rectangle);
PDDocument newDocument = new PDDocument();
for (int i = 0; i < total; i++) {
pdPage = newDocument.importPage(pdDocument.getPage(i));
PDRectangle pageRect = getRotatedMediaBox(pdPage);
int rotation = pdPage.getRotation();
PDRectangle cropBoxRect = page.getCropBox();
//Calculate margin between crop box rectangle and page rectangle.
float[] margins = getCropBoxMargin(pageRect, cropBoxRect, rotation);
if (rotation == 90 || rotation == 270) {
cropBoxRect = new PDRectangle(cropBoxRect.getLowerLeftY(), cropBoxRect.getLowerLeftX(), cropBoxRect.getHeight(),
cropBoxRect.getWidth());
}
BufferedImage bufferedImage = getImage();
PDPageContentStream pageContentStream = new PDPageContentStream(newDocument, pdPage,
PDPageContentStream.AppendMode.APPEND, true);
PDImageXObject image = JPEGFactory.createFromImage(newDocument, bufferedImage);
if (rotation == 90 || rotation == 270) {
Matrix matrix = Matrix.getRotateInstance(Math.toRadians(rotation), 0, 0);
PDRectangle cropBox = pdPage.getCropBox();
float tx = (cropBox.getLowerLeftX() + cropBox.getUpperRightX()) / 2;
float ty = (cropBox.getLowerLeftY() + cropBox.getUpperRightY()) / 2;
Rectangle rectang = cropBox.transform(matrix).getBounds();
float scale = Math.min(cropBox.getWidth() / (float)rectang.getWidth(), cropBox.getHeight() / (float)rectang.getHeight());
pageContentStream.transform(Matrix.getTranslateInstance(tx, ty));
pageContentStream.transform(matrix);
pageContentStream.transform(Matrix.getScaleInstance(scale, scale));
pageContentStream.transform(Matrix.getTranslateInstance(-tx, -ty));
}
pageContentStream.drawImage(image, rectLlx, rectLly, qrImageSize, qrImageSize);
pageContentStream.close();
}
newDocument.save(new File(DestinationFilePath(false)));
newDocument.close();
pdDocument.close();
Please help me on this or at least looking forward for your suggestions for code needs to rectify of PDFBox implementation.

Alternative to System.Drawing.Bitmap for Xamarin Forms

I need to use Bitmap class from System.Drawing.Bitmap, this is a function that works fine on windows Platform. But after I tried to run on Xamarin Forms, and installed nuget package System.Drawing, the program compiles correctly without errors.
But when running program I receive an error. Somehow seems to point to System.Drawing from windows, not the System.Drawing from the nuget package.
What I need to do is, get Photo from Camera and print it.
Below is the code to print. Problem is with "Bitmap" converter.
Tried several nuget packages, none worked:
System.Drawing.Common
Fast-Bitmap
Bitmap.Net
public byte[] PrintImage(byte[] PHOTO)
{
Bitmap bmp;
using (var ms = new MemoryStream(PHOTO))
{
bmp = new Bitmap(ms);
}
BitmapData data = GetBitmapData(bmp);
BitArray dots = data.Dots;
byte[] width = BitConverter.GetBytes(data.Width);
int offset = 0;
MemoryStream stream = new MemoryStream();
BinaryWriter bw = new BinaryWriter(stream);
// center command
bw.Write(27);
bw.Write('a');
bw.Write(1);
// print image
bw.Write((char)0x1B);
bw.Write('#');
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)24);
while (offset < data.Height)
{
bw.Write((char)0x1B);
bw.Write('*'); // bit-image mode
bw.Write((byte)33); // 24-dot double-density
bw.Write(width[0]); // width low byte
bw.Write(width[1]); // width high byte
for (int x = 0; x < data.Width; ++x)
{
for (int k = 0; k < 3; ++k)
{
byte slice = 0;
for (int b = 0; b < 8; ++b)
{
int y = (((offset / 8) + k) * 8) + b;
// Calculate the location of the pixel.
// It'll be at (y * width) + x.
int i = (y * data.Width) + x;
// If the image is shorter than 24 dots.
bool v = false;
if (i < dots.Length)
{
v = dots[i];
}
slice |= (byte)((v ? 1 : 0) << (7 - b));
}
bw.Write(slice);
}
}
offset += 24;
bw.Write((char)0x0A);
}
// Restore the line spacing to the default of 30 dots.
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)30);
bw.Flush();
byte[] bytes = stream.ToArray();
return bytes; // logo + Encoding.Default.GetString(bytes);
}
public BitmapData GetBitmapData(Bitmap bmp) // (string bmpFileName)
{
//using (var bitmap = (Bitmap)Bitmap.FromFile(bmpFileName))
using (var bitmap = bmp)
{
var threshold = 127;
var index = 0;
double multiplier = 570; // this depends on your printer
double scale = (double)(multiplier / (double)bitmap.Width);
int xheight = (int)(bitmap.Height * scale);
int xwidth = (int)(bitmap.Width * scale);
var dimensions = xwidth * xheight;
var dots = new BitArray(dimensions);
for (var y = 0; y < xheight; y++)
{
for (var x = 0; x < xwidth; x++)
{
var _x = (int)(x / scale);
var _y = (int)(y / scale);
var color = bitmap.GetPixel(_x, _y);
var luminance = (int)(color.R * 0.3 + color.G * 0.59 + color.B * 0.11);
dots[index] = (luminance < threshold);
index++;
}
}
return new BitmapData()
{
Dots = dots,
Height = (int)(bitmap.Height * scale),
Width = (int)(bitmap.Width * scale)
};
}
}
public class BitmapData
{
public BitArray Dots
{
get;
set;
}
public int Height
{
get;
set;
}
public int Width
{
get;
set;
}
}
Error occurs when function is called as:
byte[] _buffer = PrintImage(FOTO);
The error:
"Could not resolve type with token 01000119 from typeref (expected class 'System.Drawing.Bitmap' in assembly 'System.Drawing.Common, Version=4.0.1.0, Culture=neutral, PublicKeyToken=cc7b13ffcd2ddd51')"

Confused About Passing Color to Class (Java)

Trying to write a program in which if a user enters in a name that is "Randy" than Java would generate a random number between 0 to 255 (RGB). If the name is "Prius" than the color would be green only. From there I would pass that random number or the green color into my tank class.
import java.awt.*;
import java.util.*;
public class Program4
{
public static void main(String[ ] args)
{
Scanner kb = new Scanner(System.in);
Random rand = new Random();
System.out.print("Please enter in your name: ");
String name = kb.nextLine();
if(name.equalsIgnoreCase ("Randy"))
{
for (int i=1 ; i<= 3; i++)
{
int color2 = rand.nextInt(255);
Color myColor = new Color (color2);
Tank myTank = new Tank(myColor, 25);
}
}
else if (name.equalsIgnoreCase ("Prius"))
{
Color myColor = new Color (0,255,0);
Tank myTank = new Tank(myColor,25);
}
//create a new instance of a Tank, get its dimension
Color myColor = new Color(255, 0, 255);
Tank myTank = new Tank(myColor, 25);
int dimension = myTank.getDimension();
//create a new instance of a Landscape
Landscape myLS = new Landscape();
//tell the landscape to add the tank to itself
myLS.addTank(myTank);
//tell the tank to turn around
myTank.turn("left");
myTank.turn("left");
//ask the landscape where is its green opening (as an int)
Point greenPoint = myLS.getGreenOpening();
int greenY = (int)greenPoint.getY();
//tell the tank to keep moving as long as it is above the green opening
while(myTank.getPositionY() + dimension < greenY)
myTank.move();
//turn left
myTank.turn("left");
//hopefully, move through the green wall
for (int i=0; i<200; i++)
myTank.move();
Point orangePoint = myLS.getOrangeOpening();
int orangeY = (int)orangePoint.getY();
if (myTank.getPositionY() + dimension < orangeY)
{
myTank.turn("right");
while (myTank.getPositionY() + dimension < orangeY)
{
myTank.move();
}
myTank.turn("left");
}
else
{
myTank.turn("left");
while (myTank.getPositionY() + dimension > orangeY)
{
myTank.move();
}
myTank.turn("right");
}
for (int i=0 ; i<200 ; i++)
myTank.move();
Point targetLocation = myLS.getTargetLocation();
int targetY = (int)targetLocation.getY();
if (myTank.getPositionY() + dimension <targetY)
{
myTank.turn("right");
while (myTank.getPositionY() + dimension < targetY + 30)
{
myTank.move();
}
myTank.turn("left");
}
else
{
myTank.turn("left");
while (myTank.getPositionY() + dimension > targetY + 30)
{
myTank.move();
}
myTank.turn("right");
}
for (int i=0 ; i<500 ; i++)
myTank.move();
}
}
There is more to the program however, I just need help with the colors. The program compiles and works. The only problem is the random color and green is not being passed onto my tank class. The default tank color is purple.
Thank you for the help.
Have you tried this?
Color myColor;
Tank myTank;
if(name.equalsIgnoreCase ("Randy"))
{
for (int i=1 ; i<= 3; i++)
{
int color2 = rand.nextInt(255);
myColor = new Color (color2);
myTank = new Tank(myColor, 25);
}
}
else if (name.equalsIgnoreCase ("Prius"))
{
myColor = new Color (0,255,0);
myTank = new Tank(myColor,25);
}
else
{
myColor = new Color(255, 0, 255);
myTank = new Tank(myColor, 25);
}
int dimension = myTank.getDimension();

(opencv) merge contours together

I am doing a real time motion detection program. I find that there are a lot of contour made in my different image after i used background subtraction method . i would like to ask is there any method that can merge these contour together or make a larger rect contain all the contours?
the case now i have been done
http://singhgaganpreet.files.wordpress.com/2012/07/motioncolour.jpg
My code is here
#include <iostream>
#include <OpenCV/cv.h>
#include <OPenCV/highgui.h>
using namespace cv;
using namespace std;
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cvFindContours( differenceImg, storage, &contours );
//draw bounding box around each contour
for(; contours!=0; contours = contours->h_next)
{
rect = cvBoundingRect(contours, 0); //extract bounding box for current contour
//drawing rectangle
cvRectangle(currentFrame,
cvPoint(rect.x, rect.y),
cvPoint(rect.x+rect.width, rect.y+rect.height),
cvScalar(0, 0, 255, 0),
2, 8, 0);
}
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(&currentFrame);
cvReleaseImage(&currentFrame_grey);
//cvReleaseCapture(&cam);
return 0;
}
Did you try this?
std::vector<cv::Point> points;
points.insert(points.end(), contour1.begin(), contour1.end());
points.insert(points.end(), contour2.begin(), contour2.end());
convexHull(cv::Mat(points), contour);
PS. For some applications, it may be better to use approxPoly() rather than convexHull(). Just try both.
PPS. Try smoothing the resulting contour with gaussian. It also can be helpful.
I came across a similar problem. In my case I created an empty sequence then I filled it with the points of each contour, after that I fitted a bounding ellipse with that sequence.
Here is my code segment...
CvMemStorage *storage = cvCreateMemStorage ();
CvMemStorage *storage1 = cvCreateMemStorage ();
CvSeq *contours = 0;
//find contour in BInv
cvFindContours (BInv, storage, &contours, sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_NONE ,cvPoint(0,0));
//creating empty sequence of CvPoint
CvSeq* seq = cvCreateSeq(CV_SEQ_ELTYPE_POINT/*| CV_SEQ_KIND_SET | CV_SEQ_FLAG_SIMPLE*/,sizeof(CvSeq),sizeof(CvPoint),storage1);
//populating seq with all contours
for(; contours!=0; contours = contours->h_next)
for(int i=0;i<contours->total;i++)
{
CvPoint* p;
p = (CvPoint*)cvGetSeqElem (contours, i );
cvSeqPush(seq,p);
}
//bounding box and drawing
CvBox2D bbox=cvMinAreaRect2(seq, NULL );
cvEllipseBox(color,bbox,cvScalarAll(0),5,8,0);
hope this helps.
If you want to merge contours on the basis of distance apart then you can do something like this:
struct hash_pair {
template <class T1, class T2>
size_t operator()(const pair<T1, T2>& p) const
{
auto hash1 = hash<T1>{}(p.first);
auto hash2 = hash<T2>{}(p.second);
if (hash1 != hash2) {
return hash1 ^ hash2;
}
return hash1;
}
};
void findPixelsNearby(unordered_map<pair<int, int>,bool,hash_pair>&res, Point px,int pxlVal) {
for (int itr1 = (px.x) - pxlVal; itr1 <= (px.x) + pxlVal; itr1++) {
for (int itr2 = (px.y - pxlVal); itr2 <= (px.y) + pxlVal; itr2++) {
res[{itr1, itr2}] = true;
}
}
}
unordered_map<pair<int, int>, bool, hash_pair> createSets(vector<Point2f>Contour, int rect) {
unordered_map<pair<int,int>,bool,hash_pair>res;
for (auto tra : Contour) {
Point px = (Point)tra;
findPixelsNearby(res,px,rect);
}
return res;
}
//void drawContour(Mat& img, vector<Point2f>s1,int px,int py,int pz) {
// for (auto x : s1) {
// line(img, x, x, Scalar(px, py, pz), 4, 0);
//
// }
// resShow("temp",img,1);
//}
bool hasCommon(unordered_map<pair<int,int>,bool,hash_pair>s1, unordered_map<pair<int, int>, bool, hash_pair>s2){
for (auto x : s1) {
if (s2.find(x.first) != s2.end()) {
return true;
}
}
return false;
}
void MergeContours(Mat image, vector<Contour>&usableContours,int distance_considered, vector<Contour>& finalContours) {
int numberContours = usableContours.size();
vector<vector<int>>ids_for_contour_merge(numberContours);
vector<unordered_map<pair<int, int>, bool, hash_pair>>ContourSets;
vector<bool>boolVals(numberContours,false);
for (int i = 0; i < numberContours; i++) {
ContourSets.push_back(createSets(usableContours[i].points, distance_considered/2));
}
for (int i = 0; i < numberContours; i++) {
if (boolVals[i] == false) {
boolVals[i] = true;
for (int j = i+1; j < numberContours; j++) {
if (boolVals[j] == false) {
if(hasCommon(ContourSets[i], ContourSets[j])==true){
ContourSets[i].insert(ContourSets[j].begin(), ContourSets[j].end());
boolVals[j] = true;
ids_for_contour_merge[i].push_back(j);
j = i;
}
}
}
}
}
vector<bool>Visited(ids_for_contour_merge.size(), false);
for (int mr = 0; mr < ids_for_contour_merge.size(); mr++) {
if (Visited[mr] == false) {
vector<Point2f>temp=usableContours[mr].points;
if (ids_for_contour_merge[mr].size() > 0) {
for (int mc = 0; mc < ids_for_contour_merge[mr].size(); mc++) {
int valPtr = ids_for_contour_merge[mr][mc];
copy(usableContours[valPtr].points.begin(), usableContours[valPtr].points.end(), std::back_inserter(temp));
Visited[valPtr] = true;
}
}
else {
Visited[mr] = true;
}
Contour newCtr;
newCtr.points = temp;
finalContours.push_back(newCtr);
}
}
///////////////////////////////////////////////////////////////DRAWING CONTOURS
/*for (auto x : finalContours) {
cout <<"CONTOURS FINAL SIZE IS : " <<x.points.size()<<endl;
int px = 0;
int py = 0;
int pz = 0;
drawContour(image, x.points, ((px+rand())%255), ((py + rand()) % 255), ((pz + rand()) % 255));
}*/
//////////////////////////////////////////////////////////////////////////////
}
More On Github: https://github.com/HimanshuYadav117/Merge-Contours/blob/main/MergeContours.cpp