How can i get finger-prints through image using flutter - flutter

i am using opencv package https://pub.dev/packages/opencv_4 but can not get fingertprints clearly from image.
i am using this function.
Uint8List? _byte = await Cv2.morphologyEx(
pathFrom: CVPathFrom.GALLERY_CAMERA,
pathString: photoFinger.path,
operation: Cv2.COLOR_BayerGB2RGB ,
kernelSize: [30, 30],
);
i have python code using openvcv lib but dont know how to convert it into dart. using openCv dart package.
python code is:
import cv2
# Read the input image
img = cv2.imread('input_image.jpg', cv2.IMREAD_GRAYSCALE)
# Apply Gaussian blur to remove noise
img = cv2.GaussianBlur(img, (5, 5), 0)
# Apply adaptive thresholding to segment the fingerprint
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 5)
# Apply morphological operations to remove small objects and fill in gaps
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=1)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=1)
# Estimate the orientation field of the fingerprint
sobel_x = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=3)
sobel_y = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=3)
theta = cv2.phase(sobel_x, sobel_y)
# Apply non-maximum suppression to thin the ridges
theta_quantized = np.round(theta / (np.pi / 8)) % 8
thin = cv2.ximgproc.thinning(img, thinningType=cv2.ximgproc.THINNING_ZHANGSUEN, thinningIterations=5)
# Extract minutiae points from the fingerprint
minutiae = cv2.ximgproc.getFastFeatureDetector().detect(thin)
# Display the output image with minutiae points
img_with_minutiae = cv2.drawKeypoints(img, minutiae, None, color=(0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('Output Image', img_with_minutiae)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

How to extract the white rings in the given image

I have an image of a robot moving, I need to extract the white rings
in order to find the midpoint of the robot. But thresholding is not giving correct result:
What method should I try to extract only the white rings.
%code to get second image
img=imread('data\Image13.jpg');
hsv=rgb2hsv(img);
bin=hsv(:,:,3)>0.8;
Something like that?
import cv2
import numpy as np
# get bounding rectangles of contours
img = cv2.imread('img.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# filter contours by area and width
contours = [c for c in contours if (50 < cv2.contourArea(c) < 500) and cv2.boundingRect(c)[2] > 20]
# draw contours on empty mask
out = np.zeros(thresh.shape, dtype=np.uint8)
cv2.drawContours(out, contours, -1, 255, -1)
cv2.imwrite('out.png', out)
Output:

Applying adaptive thresholding on Canny edge detection

I want to remove the blurred background of images in my project dataset, and I already get a pretty nice solution in here using Canny edge detection. I want to apply an adaptive thresholding on the double threshold value requirements of Canny. I appreciate any help on this.
imageNames = glob.glob(r"C:\Users\Bikir\Pictures\rTest\*.jpg")
count=0
for i in imageNames:
img = Image.open(i)
img = np.array(img)
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# canny - I want this two values (0 and 150) to be adaptive in this case
canned = cv2.Canny(gray, 0, 150)
# dilate to close holes in lines
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(canned, kernel, iterations = 1);
# find contours
# Opencv 3.4, if using a different major version (4.0 or 2.0), remove the first underscore
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# find the biggest contour
biggest_cntr = None;
biggest_area = 0;
for contour in contours:
area = cv2.contourArea(contour);
if area > biggest_area:
biggest_area = area;
biggest_cntr = contour;
# draw contours
crop_mask = np.zeros_like(mask);
cv2.drawContours(crop_mask, [biggest_cntr], -1, (255), -1);
# opening + median blur to smooth jaggies
crop_mask = cv2.erode(crop_mask, kernel, iterations = 5);
crop_mask = cv2.dilate(crop_mask, kernel, iterations = 5);
crop_mask = cv2.medianBlur(crop_mask, 21);
# crop image
crop = np.zeros_like(img);
crop[crop_mask == 255] = img[crop_mask == 255];
img = im.fromarray(crop)
img.save(r"C:\Users\Bikir\Pictures\removed\\"+str(count)+".jpg")
count+=1

Using tesseract and opencv to extract text from image

This is the image in the code above and nothing is outputed
I am using pytesseract and opencv to recognize the text on license plate, however alot of times when i run the code below no text is outputted for the images i use
import cv2
import imutils
import numpy as np
import pytesseract as tess
tess.pytesseract.tesseract_cmd =r'C:\Users\raul__000\AppData\Local\Tesseract-OCR\tesseract.exe'
# read image file
img = cv2.imread("Plate_images/plate14.jpg")
cv2.imshow("Image", img)
cv2.waitKey(0)
# RGB to Gray scale conversion
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("1 - Grayscale Conversion", gray)
cv2.waitKey(0)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("2 - Bilateral Filter", gray)
cv2.waitKey(0)
# thresholding the grayscale image
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cv2.imshow("3 - Thresh Filter", gray)
cv2.waitKey(0)
# Dilation adds pixels to the boundaries of objects in an image
kernel = np.ones((5,5),np.uint8)
gray = cv2.dilate(gray, kernel, iterations = 1)
cv2.imshow("4 - dilation Filter", gray)
cv2.waitKey(0)
# use tesseract to convert image to string
text = tess.image_to_string(gray, lang="eng", config='--psm 6')
print(text)
This is the image in the code above and nothing is outputed
Your 4th step is removing all the text from the image
You should be able to see that when using cv2.imshow("4 - dilation Filter", gray)
If you remove the third step and run tesseract you should see output.

Extract objects (fingerprint and signature) from an image using OpenCV and python

At my website I receive an image contains the user fingerprint and signature, I wan't to extract these two pieces of information.
for example:
Original Image
import os
import cv2
import numpy as np
def imshow(label, image):
cv2.imshow(label, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#read image
rgb_img = cv2.imread('path')
rgb_img = cv2.resize(rgb_img, (900, 600))
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
Gray Image
#canny edge detection
canny = cv2.Canny(gray_img, 50, 120)
canny edge image
# Morphology Closing
kernel = np.ones((7, 23), np.uint8)
closing = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernel)
Morphology Closing
# Find contours
contours, hierarchy = cv2.findContours(closing.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# Sort Contors by area and then remove the largest frame contour
n = len(contours) - 1
contours = sorted(contours, key=cv2.contourArea, reverse=False)[:n]
copy = rgb_img.copy()
# Iterate through contours and draw the convex hull
for c in contours:
if cv2.contourArea(c) < 750:
continue
hull = cv2.convexHull(c)
cv2.drawContours(copy, [hull], 0, (0, 255, 0), 2)
imshow('Convex Hull', copy)
Image divided to parts
Now my goals are:
Know which part is the signature and which is the fingerprint
Resolve the contours overlapping if exist
P.S: I'm not sure if the previous steps are final so please if you have better steps tell me.
These are some hard examples i may wanna deal with
You can use morphology for finger print and signature selecting.
By example:
import cv2
import numpy as np
img = cv2.imread('fhZCs.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img=cv2.bitwise_not(img) #negate image
#color definition
blue_upper = np.array([130,255,255])
blue_lower = np.array([115,0,0])
#blue color mask (sort of thresholding, actually segmentation)
mask = cv2.inRange(hsv, blue_lower, blue_upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
finger=cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask2=cv2.morphologyEx(finger, cv2.MORPH_DILATE, kernel)
signature=cv2.compare(mask2, mask, cv2.CMP_LT)
signature=cv2.morphologyEx(signature, cv2.MORPH_DILATE, kernel)
signature=cv2.bitwise_and(img, img, mask=signature)
signature=cv2.bitwise_not(signature)
finger=cv2.bitwise_and(img, img, mask=finger)
finger=cv2.bitwise_not(finger)
cv2.imwrite('finger.png', finger)
cv2.imwrite('signature.png',signature)

Is there any way to read Digital Number in the oven using pytesseract OCR?

I'm doing small project using tesseract OCR.
What I want to read is Digital Number which is generated by oven.
I pre-treat the image using openCV
but, tesseract can't read the image correct.
eg. 194 as 794..
let me know is there any way to deal with this.
thanks.
the Image which I want to read is shown below
import cv2
import numpy
img_color = cv2.imread('20190509_103247.jpg', cv2.IMREAD_COLOR)
dst = img_color.copy()
roi = img_color[1600:1800,600:1100]
dst[0:200,0:500]=roi
blur = cv2.GaussianBlur(roi,(5,5),0)
gray_dst = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
ret, thr = cv2.threshold(gray_dst, 70, 255,cv2.THRESH_BINARY)
canny = cv2.Canny(roi,100,255)
sobel = cv2.Sobel(gray_dst,cv2.CV_8U,1,0,3)
laplacian = cv2.Laplacian(gray_dst,cv2.CV_8U,ksize=3)
rev = cv2.bitwise_not(canny)
# blur = cv2.GaussianBlur(roi,(5,5),0)
# stencil = numpy.zeros(rev.shape).astype(rev.dtype)
# _, contours, _ = cv2.findContours(rev, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# color = [255, 255, 255]
# cv2.fillPoly(stencil, contours, color)
# result = cv2.bitwise_and(rev, stencil)
cv2.namedWindow('Show Image')
cv2.imshow('Show Image', rev)
cv2.waitKey(0)
cv2.imwrite('savedimage.jpg', rev)
cv2.destroyAllWindows()
You can create training data that will work for your font / glyphs to improve how the numeric display gets transformed to the correct digits.
References:
https://github.com/tesseract-ocr/tesseract/wiki/TrainingTesseract-4.00#creating-training-data
https://github.com/DevashishPrasad/LCD-OCR