Merge an image and text creating another image in swift 4 - swift

I am trying to merge an UIImage and a text of an UITextField creating another Image, but I hadn't any success.
What does my app to do? or should it do?
Basically it takes the image created by the method snapshot, merge that image with a text from UITextField, creating an other image that will be save and shows in a tableView.
But I'm having a lot of trouble making it work.
When I take only the image everything works well. Follow my code.
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first, let startPoint = startPoint else {return}
let currentPoint = touch.location(in: imageView)
let frame = rect(from: startPoint, to: currentPoint)
rectShapeLayer.removeFromSuperlayer()
if frame.size.width < 1{
tfText.resignFirstResponder()
} else {
let memedImage = getImage(frame: frame, imageView: self.imageView)
save(imageView: imageView, image: memedImage)
}
}
func getImage(frame: CGRect, imageView: UIImageView) -> UIImage {
let cropImage = imageView.snapshot(rect: frame, afterScreenUpdates: true)
return cropImage
But when I try to create an Image using UIGraphicsBeginImageContextWithOptions to merge it with a textField, I fail.
Follow my code
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first, let startPoint = startPoint else {return}
let currentPoint = touch.location(in: imageView)
let frame = rect(from: startPoint, to: currentPoint)
rectShapeLayer.removeFromSuperlayer()
if frame.size.width < 1{
tfText.resignFirstResponder()
} else {
let memedImage = getImage(frame: frame, imageView: self.imageView)
save(imageView: imageView, image: memedImage)
}
}
func getImage(frame: CGRect, imageView: UIImageView) -> UIImage {
let cropImage = imageView.snapshot(rect: frame, afterScreenUpdates: true)
UIGraphicsBeginImageContextWithOptions(cropImage.size, false, 0.0)
cropImage.draw(in: frame)
tfText.drawText(in: frame)
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
Let me show you some screenshots of the my app.
First creating only image.
Now when I try to merge the text and image.
Please, look at the debug area.
The images are created, but they don’t show up on the tableView.
What am I doing wrong?
UPDATE THE QUESTION
With the code above my memedImage is empty. "Thanks Rob"
So, I changed my previous getImage(_:) to:
func getANewImage(frame: CGRect, imageView: UIImageView, textField: UITextField) -> UIImage{
let cropImage = imageView.snapshot(rect: frame, afterScreenUpdates: true)
let newImageView = UIImageView(image: cropImage)
UIGraphicsBeginImageContextWithOptions(newImageView.frame.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()!
context.translateBy(x: newImageView.frame.origin.x, y: newImageView.frame.origin.y)
newImageView.layer.render(in: context)
textField.layer.render(in: context)
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
That way I almost got... I created a new image with the textField, but the textField changed its position, it should be in the center.
With .draw does't work, but with layer.render works almost well.

I almost didn't have help with that question, only a litte hint of the my friend Rob. Thank you again Rob.
Likely, I found out how to fix the problem with the TextField position and I'd like to share the solution.
func getImage(frame: CGRect, imageView: UIImageView, textField: UITextField) -> UIImage{
//Get the new image after snapshot method
let cropImage = imageView.snapshot(rect: frame, afterScreenUpdates: true)
//Create new imageView with the cropImage
let newImageView = UIImageView(image: cropImage)
//Origin point of the Snapshot Frame.
let frameOriginX = frame.origin.x
let frameOriginY = frame.origin.y
UIGraphicsBeginImageContextWithOptions(newImageView.frame.size, false, cropImage.scale)
let context = UIGraphicsGetCurrentContext()!
//Render the "cropImage" in the CGContext
newImageView.layer.render(in: context)
//Position of the TextField
let tf_X = textField.frame.origin.x - frameOriginX
let tf_Y = textField.frame.origin.y - frameOriginY
//Context Translate with TextField position
context.translateBy(x: tf_X, y: tf_Y)
//Render the "TextField" in the CGContext
textField.layer.render(in: context)
//Create newImage
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
Of course this code can be optimized, but it worked very well for me.

Related

Crop quadrilateral image from image to swiftUI [duplicate]

I would like to clip a bezier path from an image. For some reason, the image remains the unclipped. And how do I position the path so it would be properly cut?
extension UIImage {
func imageByApplyingMaskingBezierPath(_ path: UIBezierPath, _ pathFrame: CGFrame) -> UIImage {
UIGraphicsBeginImageContext(self.size)
let context = UIGraphicsGetCurrentContext()!
context.saveGState()
path.addClip()
draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
let maskedImage = UIGraphicsGetImageFromCurrentImageContext()!
context.restoreGState()
UIGraphicsEndImageContext()
return maskedImage
}
}
You need to add your path.cgPath to your current context, also you need to remove context.saveGState() and context.restoreGState()
Use this code
func imageByApplyingMaskingBezierPath(_ path: UIBezierPath, _ pathFrame: CGRect) -> UIImage {
UIGraphicsBeginImageContext(self.size)
let context = UIGraphicsGetCurrentContext()!
context.addPath(path.cgPath)
context.clip()
draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
let maskedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return maskedImage
}
Using it
let testPath = UIBezierPath()
testPath.move(to: CGPoint(x: self.imageView.frame.width / 2, y: self.imageView.frame.height))
testPath.addLine(to: CGPoint(x: 0, y: 0))
testPath.addLine(to: CGPoint(x: self.imageView.frame.width, y: 0))
testPath.close()
self.imageView.image = UIImage(named:"Image")?.imageByApplyingMaskingBezierPath(testPath, self.imageView.frame)
Result
You can try like this.
var path = UIBezierPath()
var shapeLayer = CAShapeLayer()
var cropImage = UIImage()
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
if let touch = touches.first as UITouch?{
let touchPoint = touch.location(in: self.YourimageView)
print("touch begin to : \(touchPoint)")
path.move(to: touchPoint)
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
if let touch = touches.first as UITouch?{
let touchPoint = touch.location(in: self.YourimageView)
print("touch moved to : \(touchPoint)")
path.addLine(to: touchPoint)
addNewPathToImage()
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
if let touch = touches.first as UITouch?{
let touchPoint = touch.location(in: self.YourimageView)
print("touch ended at : \(touchPoint)")
path.addLine(to: touchPoint)
addNewPathToImage()
path.close()
}
}
func addNewPathToImage(){
shapeLayer.path = path.cgPath
shapeLayer.strokeColor = strokeColor.cgColor
shapeLayer.fillColor = UIColor.clear.cgColor
shapeLayer.lineWidth = lineWidth
YourimageView.layer.addSublayer(shapeLayer)
}
func cropImage(){
UIGraphicsBeginImageContextWithOptions(YourimageView.bounds.size, false, 1)
tempImageView.layer.render(in: UIGraphicsGetCurrentContext()!)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.cropImage = newImage!
}
#IBAction func btnCropImage(_ sender: Any) {
cropImage()
}
Once you draw a path on particular button action just call your imageByApplyingMaskingBezierPath
Here is Swift code to get clips from an image based on a UIBezierPath and it is very quickly implemented. This method works if the image is already being shown on the screen, which will most often be the case. The resultant image will have a transparent background, which is what most people want when they clip part of a photo image. You can use the resultant clipped image locally because you will have it in a UIImage object that I called imageWithTransparentBackground. This very simple code also shows you how to save the image to the camera roll, and how to also put it right into the pasteboard so a user can paste that image directly into a text message, paste it into Notes, an email, etc. Note that in order to write the image to the camera roll, you need to edit the info.plist and provide a reason for “Privacy - Photo Library Usage Description”
import Photos // Needed if you save to the camera roll
Provide a UIBezierPath for clipping. Here is my declaration for one.
let clipPath = UIBezierPath()
Populate the clipPath with some logic of your own using some combination of commands. Below are a few I used in my drawing logic. Provide CGPoint equivalents for aPointOnScreen, etc Build your path relative to the main screen as self.view is this apps ViewController (for this code), and self.view.layer is rendered through the clipPath.
clipPath.move(to: aPointOnScreen)
clipPath.addLine(to: otherPointOnScreen)
clipPath.addLine(to: someOtherPointOnScreen)
clipPath.close()
This logic uses all of the devices screen as the context size. A CGSize is declared for that. fullScreenX and fullScreenY are my variables where I have already captured the devices width and height. It is nice if the photo you are clipping from is already zoomed into and is an adequate size as shown on the whole of the screen. What you see, is what you get.
let mainScreenSize = CGSize(width: fullScreenX, height: fullScreenY)
// Get an empty context
UIGraphicsBeginImageContext(mainScreenSize)
// Specify the clip path
clipPath.addClip()
// Render through the clip path from the whole of the screen.
self.view.layer.render(in: UIGraphicsGetCurrentContext()!)
// Get the clipped image from the context
let image : UIImage = UIGraphicsGetImageFromCurrentImageContext()!
// Done with the context, so end it.
UIGraphicsEndImageContext()
// The PNG data has the alpha channel for the transparent background
let imageData = image.pngData()
// Below is the local UIImage to use within your code
let imageWithTransparentBackground = UIImage.init(data: imageData!)
// Make the image available to the pasteboard.
UIPasteboard.general.image = imageWithTransparentBackground
// Save the image to the camera roll.
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAsset(from: imageWithTransparentBackground!)
}, completionHandler: { success, error in
if success {
//
}
else if let error = error {
//
}
else {
//
}
})

Draw graphics and export with pixel precision with CoreGraphics

I saw few questions here on stackoverflow but none of them is solving my problem. What I want to do is to subclass NSView and draw some shapes on it. Then I want to export/save created graphics to png file. And while drawing is quite simple, I want to be able to store image with pixel precision - I know that drawing is being done in points instead of pixels. So what I am doing is I override draw() method to draw any graphic like so:
override func draw(_ dirtyRect: NSRect) {
super.draw(dirtyRect)
NSColor.white.setFill()
dirtyRect.fill()
NSColor.green.setFill()
NSColor.green.setStroke()
currentContext?.beginPath()
currentContext?.setLineWidth(1.0)
currentContext?.setStrokeColor(NSColor.green.cgColor)
currentContext?.move(to: CGPoint(x: 0, y: 0))
currentContext?.addLine(to: CGPoint(x: self.frame.width, y: self.frame.height))
currentContext?.closePath()
}
and since on screen it looks OK, after saving this to file is not what I expected. I set line width to 1 but in exported file it is 2 pixels wide. And to save image, I create NSImage from current view:
func getImage() -> NSImage? {
let size = self.bounds.size
let imageSize = NSMakeSize(size.width, size.height)
guard let imageRepresentation = self.bitmapImageRepForCachingDisplay(in: self.bounds) else {
return nil
}
imageRepresentation.size = imageSize
self.cacheDisplay(in: self.bounds, to: imageRepresentation)
let image = NSImage(size: imageSize)
image.addRepresentation(imageRepresentation)
return image
}
and this image is then save to file:
do {
guard let image = self.canvasView?.getImage() else {
return
}
let imageRep = image.representations.first as? NSBitmapImageRep
let data = imageRep?.representation(using: .png, properties: [:])
try data?.write(to: url, options: .atomic)
} catch {
print(error.localizedDescription)
}
Do you have any tips of what I am doing wrong?

ScreenShot issue in ARKit in swift

i have application that uses ARSCNView. i'm trying to take a screenshot on click of a button and saved that image in the gallery. But when i take a screenshot it does not show the content on that screen. Just show that image, i have some labels on it but it does not show that in an image. This is my code,
#IBAction func captureImage(_ sender: Any) {
image = sceneView.snapshot()
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
}
How can i show that labels and buttons on ARSCView in a screenshot?
snapshot() will only take screenshot of Scene.
To take screenshot of Scene with Labels and Buttons use below method:
func snapshot(of rect: CGRect? = nil) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(self.view.bounds.size, self.view.isOpaque, 0)
self.view.drawHierarchy(in: self.view.bounds, afterScreenUpdates: true)
let fullImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard let image = fullImage, let rect = rect else { return fullImage }
let scale = image.scale
let scaledRect = CGRect(x: rect.origin.x * scale, y: rect.origin.y * scale, width: rect.size.width * scale, height: rect.size.height * scale)
guard let cgImage = image.cgImage?.cropping(to: scaledRect) else { return nil }
return UIImage(cgImage: cgImage, scale: scale, orientation: .up)
}
To Take Screenshot:
#IBAction func takeScreenShotTapped(_ sender: Any) {
let screenShot = snapshot(of: CGRect(x: 80, y: 80, width: 100, height: 100))
}
If you want to take screenshot of fullscreen then just call snapshot().
Hope this will help you :)

write text in drawRect MKPolygonRenderer

Ok I'm at my wits end. I'm new to MapKit and writing a health stats app that plots data on a MKMapView relative to where patients live. I can drop annotations, and draw polygons of postcode areas with no problem.
However I'd like to write text on the map polygon (and not an annotation).
Here's my code, I've subclassed MKPolygonRender to access drawRect.
Polygons are perfect but no text. I've tried write to the first point in the polygon (I will find centre eventually)
Been stuck for several nights so really grateful.
class PolygonRender: MKPolygonRenderer {
var point:CGPoint!
override init(overlay: MKOverlay) {
super.init(overlay: overlay)
}
override func drawMapRect(mapRect: MKMapRect, zoomScale: MKZoomScale, inContext context: CGContext) {
super.drawMapRect(mapRect, zoomScale: zoomScale, inContext: context)
let mapRect:MKMapRect = self.overlay.boundingMapRect
let rect:CGRect = self.rectForMapRect(mapRect)
UIGraphicsPushContext(context)
var bPath = UIBezierPath()
//let polygon:MKPolygon = self.polygon
super.fillColor = UIColor.yellowColor()
let points = self.polygon.points()
let pointCount = self.polygon.pointCount
var point:CGPoint = self.pointForMapPoint(points[0])
bPath.moveToPoint(point)
for var i = 1; i < pointCount; i++ {
point = self.pointForMapPoint(points[i])
bPath.addLineToPoint(point)
}
bPath.closePath()
bPath.addClip()
let roadWidth:CGFloat = MKRoadWidthAtZoomScale(zoomScale)
let attributes: [String: AnyObject] = [
NSForegroundColorAttributeName : UIColor.blackColor(),
NSFontAttributeName : UIFont.systemFontOfSize(5 * roadWidth)]
let centerPoint = pointForMapPoint(points[0])
print(centerPoint)
let string:NSString = "Hello world"
string.drawAtPoint(centerPoint, withAttributes: attributes)
UIGraphicsPopContext()
}
Generally you do it right but I think the font size is too small to see the text.
Try to set font size to value like 100 or bigger to find out the text is there, but you will need to zoom in.
Example:
public override func draw(_ mapRect: MKMapRect, zoomScale: MKZoomScale, in context: CGContext) {
let rect:CGRect = self.rect(for: mapRect)
UIGraphicsPushContext(context);
// ...
UIGraphicsPushContext(context);
UIColor.black.setStroke()
context.setLineWidth(1)
context.stroke(rect.insetBy(dx: 0.5, dy: 0.5))
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.alignment = .center
let attributes = [NSAttributedStringKey.paragraphStyle : paragraphStyle,
NSAttributedStringKey.font : UIFont.systemFont(ofSize: 100.0),
NSAttributedStringKey.foregroundColor : UIColor.black]
"\(rect.size)".draw(in: rect, withAttributes: attributes)
UIGraphicsPopContext();
}
Image below shows how on the map is displayed text with font size 100 when the rect size is 1024x1024

UIImageView cropping

I am trying to allow users to crop an image. The problem is the cropped image is not the same as the view. Here are two screenshots to show what I mean.
pre crop screen:
!http://i58.tinypic.com/4rap9u.png
after crop
!http://i58.tinypic.com/2nrpjlj.png
Here is the code I use to create the new cropped image. The UIImageView is inside a UIScrollView to allow user to zoom and pan. Even when the image has not been zoomed it does not work correctly. Ideally I would like to allow the user to move the dashed square around the screen to select a portion of the image to crop, as well as zoom in/out.
func croppedImage() -> UIImage?
{
var drawRect: CGRect = CGRectZero
var cropRect : CGRect!
cropRect = photoFrameView?.frame
zoom = imgScroll.zoomScale
println(zoom)
drawRect.size = imgPreview.bounds.size
drawRect.origin.x = round(-cropRect.origin.x * zoom)
drawRect.origin.y = round(-cropRect.origin.y * zoom)
cropRect.size.width = round(cropRect.size.width*zoom)
cropRect.size.height = round(cropRect.size.height*zoom)
cropRect.origin.x = round(cropRect.origin.x)
cropRect.origin.y = round(cropRect.origin.y)
UIGraphicsBeginImageContextWithOptions(cropRect.size, false, UIScreen.mainScreen().scale)
self.imgPreview.image?.drawInRect(drawRect)
var result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
return result
}
here is where I crop the image:
let cImg = croppedImage()!
UIImageWriteToSavedPhotosAlbum(cImg, nil, nil, nil);
imgPreview.image = cImg
self.imgPreview.contentMode = .ScaleAspectFit
Also here is the code for my UIView subclass(photoFrameView). Not sure if it is needed.
import UIKit
class mycropView: UIView {
var lastLocation:CGPoint = CGPointMake(0, 0)
override init(frame: CGRect) {
super.init(frame: frame)
// Initialization code
var panRecognizer = UIPanGestureRecognizer(target:self, action:"detectPan:")
self.gestureRecognizers = [panRecognizer]
self.addDashedBorder()
self.backgroundColor = UIColor.clearColor()
}
required init(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func detectPan(recognizer:UIPanGestureRecognizer) {
var translation = recognizer.translationInView(self.superview!)
self.center = CGPointMake(lastLocation.x + translation.x, lastLocation.y + translation.y)
}
override func touchesBegan(touches: NSSet, withEvent event: UIEvent) {
// Promote the touched view
self.superview?.bringSubviewToFront(self)
// Remember original location
lastLocation = self.center
}
override func hitTest(point: CGPoint, withEvent e: UIEvent?) -> UIView? {
if let result = super.hitTest(point, withEvent:e) {
println("hit inside")
return result
} else {
self.removeFromSuperview()
}
return nil
}
}
extension UIView {
func addDashedBorder() {
let color = UIColor.yellowColor().CGColor
let shapeLayer:CAShapeLayer = CAShapeLayer()
let frameSize = self.frame.size
let shapeRect = CGRect(x: 0, y: 0, width: frameSize.width, height: frameSize.height)
shapeLayer.bounds = shapeRect
shapeLayer.position = CGPoint(x: frameSize.width/2, y: frameSize.height/2)
shapeLayer.fillColor = UIColor.clearColor().CGColor
shapeLayer.strokeColor = color
shapeLayer.lineWidth = 2
shapeLayer.lineJoin = kCALineJoinRound
shapeLayer.lineDashPattern = [6,3]
shapeLayer.path = UIBezierPath(roundedRect: shapeRect, cornerRadius: 5).CGPath
self.layer.addSublayer(shapeLayer)
}
}