How to fix MTKView random fill color when rendering a new image - swift

I'm working on a photo editor and I just switched from a UIImageView to an MTKView for performance reasons. It works great except for some reason on some images there is a red box. The image size changes and the red box gets bigger when displaying a new image.
I am using Mac Catalyst on this
This is my MTKView code
public let colorSpace = CGColorSpaceCreateDeviceRGB()
public lazy var commandQueue: MTLCommandQueue = {
[unowned self] in
if self.device == nil {
self.device = MTLCreateSystemDefaultDevice()
}
return self.device!.makeCommandQueue()!
}()
public lazy var ciContext: CIContext = {
[unowned self] in
if self.device == nil {
self.device = MTLCreateSystemDefaultDevice()
}
let context = CIContext(mtlDevice: self.device!, options: [CIContextOption.highQualityDownsample: true, CIContextOption.priorityRequestLow: false])
return context
}()
public override init(frame frameRect: CGRect, device: MTLDevice? = MTLCreateSystemDefaultDevice()) {
super.init(frame: frameRect,
device: device ?? MTLCreateSystemDefaultDevice())
if super.device == nil
{
fatalError("Device doesn't support Metal")
}
self.framebufferOnly = false
}
required public init(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
self.framebufferOnly = false
self.device = MTLCreateSystemDefaultDevice()
}
/// The image to display
public var image: CIImage?
{
didSet
{
renderImage()
}
}
public var orientation: CGImagePropertyOrientation? {
didSet {
renderImage()
}
}
func renderImage()
{
guard var
image = self.image,
let targetTexture = self.currentDrawable?.texture else
{
print("No texture/image")
return
}
self.drawableSize = image.extent.size
if let orientation = orientation {
image = image.oriented(orientation)
}
let commandBuffer = self.commandQueue.makeCommandBuffer()
let bounds = CGRect(origin: CGPoint.zero, size: self.drawableSize)
let originX = image.extent.origin.x
let originY = image.extent.origin.y
let scaleX = self.drawableSize.width / image.extent.width
let scaleY = self.drawableSize.height / image.extent.height
let scale = min(scaleX, scaleY)
let scaledImage = image
.transformed(by: CGAffineTransform(translationX: -originX, y: -originY))
.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
self.ciContext.render(scaledImage,
to: targetTexture,
commandBuffer: commandBuffer,
bounds: bounds,
colorSpace: self.colorSpace)
commandBuffer?.present(self.currentDrawable!)
commandBuffer?.commit()
self.draw()
self.releaseDrawables()
}

Related

Swift capture photos with portrait effect matte

I would like to implement a camera to capture portrait photos like apples default camera does. Portraiteffect and depnhdata is enabled in the PhotoSettings.
AVCapturePhotoSettings
self.output.isPortraitEffectsMatteDeliveryEnabled = true
self.output.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.embedsDepthDataInPhoto = true
photoSettings.embedsPortraitEffectsMatteInPhoto = true
printing the AVCapturePhoto.portraitEffectsMatte returns Optional(L008 2080x1170 v.1.1) but neither in my preview layer nor in the saved image is the portrait effect visible.
Additional context
AVCaptureDevice uses the builtInDualWideCamera
Printing the output.portraitEffekt logs also true
Full code:
class ViewController: UIViewController {
var session: AVCaptureSession?
var output = AVCapturePhotoOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
private func setUpCamera(){
let session = AVCaptureSession()
if let device = AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: (useFrontCamera ? AVCaptureDevice.Position.front : AVCaptureDevice.Position.back)){
do {
let input = try AVCaptureDeviceInput(device: device)
if session.canAddInput(input){
session.addInput(input)
}
if session.canAddOutput(output){
session.addOutput(output)
}
previewLayer.videoGravity = .resizeAspectFill
previewLayer.session = session
session.startRunning()
self.session = session
}
catch {
print(error)
}
}
}
private func getSettings() -> AVCapturePhotoSettings{
var photoSettings = AVCapturePhotoSettings()
if(self.output.isPortraitEffectsMatteDeliverySupported && self.output.isDepthDataDeliverySupported){
self.output.isPortraitEffectsMatteDeliveryEnabled = true
self.output.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.embedsDepthDataInPhoto = true
photoSettings.embedsPortraitEffectsMatteInPhoto = true
}
return photoSettings
}
private funk takePhoto(){
self.output.capturePhoto(with: self.getSettings(), delegate: self)
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation() else { return}
let image = UIImage(data: data)
let imageView = UIImageView(image: image)
session?.stopRunning()
imageView.contentMode = .scaleAspectFill
imageView.frame = CGRect(x: 0, y: 0, width: view.frame.width/4, height: view.frame.height/4)
imageView.layer.name = "photoPreview"
view.addSubview(imageView)
UIImageWriteToSavedPhotosAlbum(image!, self, nil, nil)
}
}
```

UIView to UIImage code is not working. It causing an runtime error

I have to convert UIView to UIImage because I want to make a custom marker.
Currently, I'm making an app using Naver Map SDK.
Here is My Code.
let marker = NMFMarker()
marker.position = NMGLatLng(lat: lat, lng: lng)
let windowView = CustomInfoWindowView()
windowView.nameLabel.text = name
windowView.jobLabel.text = job
windowView.scoreLabel.text = "\(score)"
marker.iconImage = NMFOverlayImage(image: windowView.asImage())
marker.mapView = mapView
NMFOverlayImage only supported UIImage type.
Here is My full code.
class MapViewController: UIViewController {
let viewModel = MapViewModel()
let locations = BehaviorRelay<Void>(value: ())
let disposedBag = DisposeBag()
private let imageCell = UIImageView()
override func viewDidLoad() {
super.viewDidLoad()
let mapView = NMFMapView(frame: view.frame)
view.addSubview(mapView)
bind(mapView: mapView)
}
private func bind(mapView: NMFMapView) {
let input = MapViewModel.Input(getLocations: locations.asSignal(onErrorJustReturn: ()))
let output = viewModel.transform(input)
output.getLocations.asObservable().subscribe(onNext: { [unowned self] res in
if !res.isEmpty {
for i in 0..<res.count {
addMarker(
lat: res[i].address[1],
lng: res[i].address[0],
mapView: mapView,
name: res[i].name,
job: res[i].field,
score: res[i].level
)
}
}
}).disposed(by: disposedBag)
}
private func addMarker(lat: Double, lng: Double, mapView: NMFMapView, name: String, job: String, score: Double) {
let marker = NMFMarker()
marker.position = NMGLatLng(lat: lat, lng: lng)
let windowView = CustomInfoWindowView()
windowView.nameLabel.text = name
windowView.jobLabel.text = job
windowView.scoreLabel.text = "\(score)"
marker.iconImage = NMFOverlayImage(image: windowView.asImage())
marker.mapView = mapView
}
}
and this is my custom view code.
import UIKit
import SnapKit
import Then
class CustomInfoWindowView: UIView {
let customView = UIView().then {
$0.backgroundColor = .clear
}
let windowView = UIView().then {
$0.backgroundColor = R.color.mainColor()
$0.layer.cornerRadius = 10
}
let marker = UIImageView().then {
$0.image = R.image.marker()
}
let nameLabel = UILabel().then {
$0.font = .boldSystemFont(ofSize: 16)
$0.textColor = .white
}
let jobLabel = UILabel().then {
$0.font = .systemFont(ofSize: 12, weight: .medium)
$0.textColor = R.color.underLine()!
}
let starLogo = UIImageView().then {
$0.image = UIImage(systemName: "star.fill")
$0.tintColor = .systemYellow
}
let scoreLabel = UILabel().then {
$0.font = .boldSystemFont(ofSize: 14)
$0.textColor = .white
}
override init(frame: CGRect) {
super.init(frame: frame)
setUpSubViews()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
}
private func setUpSubViews() {
self.addSubview(customView)
[windowView, marker].forEach({customView.addSubview($0)})
[nameLabel, jobLabel, starLogo, scoreLabel].forEach({self.addSubview($0)})
customView.snp.makeConstraints {
$0.width.equalTo(108)
$0.height.equalTo(100)
}
windowView.snp.makeConstraints {
$0.width.equalToSuperview()
$0.height.equalTo(64)
$0.top.equalTo(0)
}
marker.snp.makeConstraints {
$0.width.equalTo(20)
$0.height.equalTo(27)
$0.top.equalTo(windowView.snp.bottom).offset(10)
$0.centerX.equalTo(windowView.snp.centerX)
}
nameLabel.snp.makeConstraints {
$0.top.leading.equalTo(10)
$0.width.equalTo(10)
}
jobLabel.snp.makeConstraints {
$0.centerY.equalTo(nameLabel.snp.centerY)
$0.leading.equalTo(nameLabel.snp.trailing).offset(5)
}
starLogo.snp.makeConstraints {
$0.top.equalTo(nameLabel.snp.bottom).offset(5)
$0.leading.equalTo(nameLabel.snp.leading)
$0.width.height.equalTo(15)
}
scoreLabel.snp.makeConstraints {
$0.centerY.equalTo(starLogo.snp.centerY)
$0.leading.equalTo(starLogo.snp.trailing).offset(3)
}
}
}
and this is my UIView Extension code.
extension UIView {
func asImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { context in
layer.render(in: context.cgContext)
}
}
}
I Use Snapkit, Then, RxSwift, RxCocoa,NMapsMap.
please Help me.
When I run the code .asImage(), It had runtime error.
enter image description here

Crash on creating an image from UIGraphicsImageRenderer

i'm getting a Thread 1: EXC_BAD_ACCESS (code=EXC_I386_GPFLT) when
layer.render(in: context) is called
where i'm trying to create a snapshot image from a custom window
let renderer = UIGraphicsImageRenderer(bounds: bounds, format: .init(for: traitCollection))
return renderer.image { action in
let context = action.cgContext
layer.render(in: context)
}
Use can use bellow extension to get it
private var rendererKey: UInt8 = 0
extension UIView {
var renderer: UIGraphicsImageRenderer! {
get {
guard let rendererInstance = objc_getAssociatedObject(self, &rendererKey) as? UIGraphicsImageRenderer else {
self.renderer = UIGraphicsImageRenderer(bounds: bounds)
return self.renderer
}
return rendererInstance
}
set(newValue) {
objc_setAssociatedObject(self, &rendererKey, newValue, objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
}
func snapImageView() -> UIImageView {
let img:UIImage = renderer.image { ctx in
DispatchQueue.main.async {
layer.render(in: ctx.cgContext)
}
}
let imageView:UIImageView = UIImageView(image: img)
imageView.frame = renderer.format.bounds
imageView.clipsToBounds = true
return imageView
}
}
// Generate image and image view of any view instance
let anImageView = yourView.snapImageView()

Taking a square photo with Camera App

I am currently building a camera app and want to make the camera take a square image 375x375 like Instagram and save it like that.
I am able to square off the viewfinder of the camera but it is not taking the picture the right way, also when I save it it saves it in full view. I looked around the other Q&As on there but none of them seem to work with my code.
Can someone please help me figure this out.
import Foundation
import UIKit
import AVFoundation
class CameraViewController: UIViewController{
var captureSession = AVCaptureSession()
var frontCameraDeviceInput: AVCaptureDeviceInput?
var backCameraDeviceInput: AVCaptureDeviceInput?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let frontCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
frontCameraDeviceInput = try? AVCaptureDeviceInput(device: frontCamera!)
backCameraDeviceInput = try? AVCaptureDeviceInput(device: backCamera!)
}
func setupInputOutput() {
captureSession.addInput(backCameraDeviceInput!)
photoOutput = AVCapturePhotoOutput()
photoOutput?.isHighResolutionCaptureEnabled = true
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
#IBAction func camerButton(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
#IBAction func switchCamera(_ sender: Any) {
captureSession.beginConfiguration()
//Change camera device inputs from back to front or opposite
if captureSession.inputs.contains(frontCameraDeviceInput!) == true {
captureSession.removeInput(frontCameraDeviceInput!)
captureSession.addInput(backCameraDeviceInput!)
} else if captureSession.inputs.contains(backCameraDeviceInput!) == true {
captureSession.removeInput(backCameraDeviceInput!)
captureSession.addInput(frontCameraDeviceInput!)
}
//Commit all the configuration changes at once
captureSession.commitConfiguration();
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "previewCameraPhoto" {
let previewVC = segue.destination as! PreviewViewController
previewVC.image = self.image
}
}
}
extension CameraViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
image = UIImage(data: imageData)
performSegue(withIdentifier: "previewCameraPhoto", sender: nil)
}
}
override var prefersStatusBarHidden: Bool
{
return true
}
}
The below lines of code are used to capture Image. I execute them when the capture button is tapped. In your case it is
func camerButton(_ sender: Any)
The definition of methods used is also there below.
DispatchQueue.global(qos: .default).async {
let videoConnection = self.imageOutput.connection(with: AVMediaType.video)
let orientation: UIDeviceOrientation = UIDevice.current.orientation
switch orientation {
case .portrait:
videoConnection?.videoOrientation = .portrait
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
default:
videoConnection?.videoOrientation = .portrait
}
self.imageOutput.captureStillImageAsynchronously(from: videoConnection!) { buffer, _ in
self.session.stopRunning()
guard let b = buffer
else { return }
let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(b)
if var image = UIImage(data: data!) {
// Crop the image if the output needs to be square.
if self.configuration.onlySquareImagesFromCamera {
image = self.cropImageToSquare(image)
}
// Flip image if taken form the front camera.
if let device = self.device, device.position == .front {
image = self.flipImage(image: image)
}
DispatchQueue.main.async {
self.didCapturePhoto?(image)
}
}
}
}
The two methods used in this function -
func cropImageToSquare(_ image: UIImage) -> UIImage {
let orientation: UIDeviceOrientation = UIDevice.current.orientation
var imageWidth = image.size.width
var imageHeight = image.size.height
switch orientation {
case .landscapeLeft, .landscapeRight:
// Swap width and height if orientation is landscape
imageWidth = image.size.height
imageHeight = image.size.width
default:
break
}
// The center coordinate along Y axis
let rcy = imageHeight * 0.5
let rect = CGRect(x: rcy - imageWidth * 0.5, y: 0, width: imageWidth, height: imageWidth)
let imageRef = image.cgImage?.cropping(to: rect)
return UIImage(cgImage: imageRef!, scale: 1.0, orientation: image.imageOrientation)
}
// Used when image is taken from the front camera.
func flipImage(image: UIImage!) -> UIImage! {
let imageSize: CGSize = image.size
UIGraphicsBeginImageContextWithOptions(imageSize, true, 1.0)
let ctx = UIGraphicsGetCurrentContext()!
ctx.rotate(by: CGFloat(Double.pi/2.0))
ctx.translateBy(x: 0, y: -imageSize.width)
ctx.scaleBy(x: imageSize.height/imageSize.width, y: imageSize.width/imageSize.height)
ctx.draw(image.cgImage!, in: CGRect(x: 0.0,
y: 0.0,
width: imageSize.width,
height: imageSize.height))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
I should not forget to give credit to the developers of this library - https://github.com/Yummypets/YPImagePicker/blob/2.5.1/Source/Camera/YPCameraVC.swift
Just add this to the image picker and the user will get the option of choosing their preferred crop ratio. the default will be as you wanted..a Square shaped photo
self.ImagePicker.allowsEditing = true

CABasicAnimation creates empty default value copy of CALayer

I have a custom CALayer that draws radial gradients. It works great except during animation. It seems that each iteration of CABasicAnimation creates a new copy of the CALayer subclass with empty, default values for the properties:
In the screenshot above, you see that CABasicAnimation has created a new copy of the layer and is updating gradientOrigin but none of the other properties have come along for the ride.
This has the result of not rendering anything during the animation. Here's a GIF:
Here's what is should look like:
Here's the animation code:
let animation = CABasicAnimation(keyPath: "gradientOrigin")
animation.duration = 2
animation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut)
let newOrigin: CGPoint = CGPoint(x: 0, y: triangle.bounds.height/2)
animation.fromValue = NSValue(CGPoint: triangle.gradientLayer.gradientOrigin)
animation.toValue = NSValue(CGPoint: newOrigin)
triangle.gradientLayer.gradientOrigin = newOrigin
triangle.gradientLayer.addAnimation(animation, forKey: nil)
Here's the custom CALayer code:
enum RadialGradientLayerProperties: String {
case gradientOrigin
case gradientRadius
case colors
case locations
}
class RadialGradientLayer: CALayer {
var gradientOrigin = CGPoint() {
didSet { setNeedsDisplay() }
}
var gradientRadius = CGFloat() {
didSet { setNeedsDisplay() }
}
var colors = [CGColor]() {
didSet { setNeedsDisplay() }
}
var locations = [CGFloat]() {
didSet { setNeedsDisplay() }
}
override init(){
super.init()
needsDisplayOnBoundsChange = true
}
required init(coder aDecoder: NSCoder) {
super.init()
}
override init(layer: AnyObject) {
super.init(layer: layer)
}
override class func needsDisplayForKey(key: String) -> Bool {
if key == RadialGradientLayerProperties.gradientOrigin.rawValue || key == RadialGradientLayerProperties.gradientRadius.rawValue || key == RadialGradientLayerProperties.colors.rawValue || key == RadialGradientLayerProperties.locations.rawValue {
print("called \(key)")
return true
}
return super.needsDisplayForKey(key)
}
override func actionForKey(event: String) -> CAAction? {
if event == RadialGradientLayerProperties.gradientOrigin.rawValue || event == RadialGradientLayerProperties.gradientRadius.rawValue || event == RadialGradientLayerProperties.colors.rawValue || event == RadialGradientLayerProperties.locations.rawValue {
let animation = CABasicAnimation(keyPath: event)
animation.fromValue = self.presentationLayer()?.valueForKey(event)
return animation
}
return super.actionForKey(event)
}
override func drawInContext(ctx: CGContext) {
guard let colorRef = self.colors.first else { return }
let numberOfComponents = CGColorGetNumberOfComponents(colorRef)
let colorSpace = CGColorGetColorSpace(colorRef)
let deepGradientComponents: [[CGFloat]] = (self.colors.map {
let colorComponents = CGColorGetComponents($0)
let buffer = UnsafeBufferPointer(start: colorComponents, count: numberOfComponents)
return Array(buffer) as [CGFloat]
})
let flattenedGradientComponents = deepGradientComponents.flatMap({ $0 })
let gradient = CGGradientCreateWithColorComponents(colorSpace, flattenedGradientComponents, self.locations, self.locations.count)
CGContextDrawRadialGradient(ctx, gradient, self.gradientOrigin, 0, self.gradientOrigin, self.gradientRadius, .DrawsAfterEndLocation)
}
}
Figured out the answer!
In init(layer:) you have to copy the property values to your class manually. Here's how that looks in action:
override init(layer: AnyObject) {
if let layer = layer as? RadialGradientLayer {
gradientOrigin = layer.gradientOrigin
gradientRadius = layer.gradientRadius
colors = layer.colors
locations = layer.locations
}
super.init(layer: layer)
}