CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661) - swift

I've implemented previous suggestions with Swift (How to use CVPixelBufferPool in conjunction with AVAssetWriterInputPixelBufferAdaptor in iPhone?),
but got stuck with an "kCVReturnInvalidArgument" (error value: -6661) when using CVPixelBufferPoolCreatePixelBuffer as guided.
I'm basically trying to create a movie from images, but as the buffer pool isn't created successfully, I can't append pixel buffers--here is my code for doing this.
Any suggestions are highly appreciated!
import Foundation
import Photos
import OpenGLES
import AVFoundation
import CoreMedia
class MovieGenerator {
var _videoWriter:AVAssetWriter
var _videoWriterInput: AVAssetWriterInput
var _adapter: AVAssetWriterInputPixelBufferAdaptor
var _buffer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
init(frameSize size: CGSize, outputURL url: NSURL) {
// delete file if exists
let sharedManager = NSFileManager.defaultManager() as NSFileManager
if(sharedManager.fileExistsAtPath(url.path!)) {
sharedManager.removeItemAtPath(url.path, error: nil)
}
// video writer
_videoWriter = AVAssetWriter(URL: url, fileType: AVFileTypeQuickTimeMovie, error: nil)
// writer input
var videoSettings = [AVVideoCodecKey:AVVideoCodecH264, AVVideoWidthKey:size.width, AVVideoHeightKey:size.height]
_videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
_videoWriterInput.expectsMediaDataInRealTime = true
_videoWriter.addInput(_videoWriterInput)
// pixel buffer adapter
var adapterAttributes = [kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_32BGRA, kCVPixelBufferWidthKey: size.width,
kCVPixelBufferHeightKey: size.height,
kCVPixelFormatOpenGLESCompatibility: kCFBooleanTrue]
_adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: _videoWriterInput, sourcePixelBufferAttributes: adapterAttributes)
var poolCreateResult:CVReturn = CVPixelBufferPoolCreatePixelBuffer(nil, _adapter.pixelBufferPool, _buffer)
println("pool creation:\(poolCreateResult)")
_videoWriter.startWriting()
_videoWriter.startSessionAtSourceTime(kCMTimeZero)
}
func addImage(image:UIImage, frameNum:Int, fps:Int)->Bool {
self.createPixelBufferFromCGImage(image.CGImage, pixelBufferPtr: _buffer)
var presentTime:CMTime = CMTimeMake(Int64(frameNum), Int32(fps))
var result:Bool = _adapter.appendPixelBuffer(_buffer.memory?.takeUnretainedValue(), withPresentationTime: presentTime)
return result
}
func finalizeMovie(timeStamp: CMTime) {
_videoWriterInput.markAsFinished()
_videoWriter.endSessionAtSourceTime(timeStamp)
_videoWriter.finishWritingWithCompletionHandler({println("video writer finished with status: \(self._videoWriter.status)")})
}
func createPixelBufferFromCGImage(image: CGImage, pixelBufferPtr: UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>) {
let width:UInt = CGImageGetWidth(image)
let height:UInt = CGImageGetHeight(image)
let imageData:CFData = CGDataProviderCopyData(CGImageGetDataProvider(image))
let options:CFDictionary = [kCVPixelBufferCGImageCompatibilityKey:NSNumber.numberWithBool(true), kCVPixelBufferCGBitmapContextCompatibilityKey:NSNumber.numberWithBool(true)]
var status:CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, width, height, OSType(kCVPixelFormatType_32BGRA), options, pixelBufferPtr)
assert(status != 0,"CVPixelBufferCreate: \(status)")
var lockStatus:CVReturn = CVPixelBufferLockBaseAddress(pixelBufferPtr.memory?.takeUnretainedValue(), 0)
println("CVPixelBufferLockBaseAddress: \(lockStatus)")
var pxData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixelBufferPtr.memory?.takeUnretainedValue())
let bitmapinfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.NoneSkipFirst.toRaw())
let rgbColorSpace:CGColorSpace = CGColorSpaceCreateDeviceRGB()
var context:CGContextRef = CGBitmapContextCreate(pxData, width, height, 8, 4*CGImageGetWidth(image), rgbColorSpace, bitmapinfo!)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
CVPixelBufferUnlockBaseAddress(pixelBufferPtr.memory?.takeUnretainedValue(), 0)
}
}

I can't exactly answer your question, frustratingly, but I am working on code that does essentially the same thing. And, mine happens to get further than the error you have been getting; it gets all the way to the point where it's attempting to add the images to the movie and then simply fails by never getting a successful result from appendPixelBuffer() -- and I'm not sure how to figure out why. I'm posting this in the hopes that it helps you get further, though.
(My code is adapted from AVFoundation + AssetWriter: Generate Movie With Images and Audio, and I used your post to help navigate som e of the pointer interop shenanigans...)
func writeAnimationToMovie(path: String, size: CGSize, animation: Animation) -> Bool {
var error: NSError?
let writer = AVAssetWriter(URL: NSURL(fileURLWithPath: path), fileType: AVFileTypeQuickTimeMovie, error: &error)
let videoSettings = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: size.width, AVVideoHeightKey: size.height]
let input = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: input, sourcePixelBufferAttributes: nil)
input.expectsMediaDataInRealTime = true
writer.addInput(input)
writer.startWriting()
writer.startSessionAtSourceTime(kCMTimeZero)
var buffer: CVPixelBufferRef
var frameCount = 0
for frame in animation.frames {
let rect = CGRectMake(0, 0, size.width, size.height)
let rectPtr = UnsafeMutablePointer<CGRect>.alloc(1)
rectPtr.memory = rect
buffer = pixelBufferFromCGImage(frame.image.CGImageForProposedRect(rectPtr, context: nil, hints: nil).takeUnretainedValue(), size)
var appendOk = false
var j = 0
while (!appendOk && j < 30) {
if pixelBufferAdaptor.assetWriterInput.readyForMoreMediaData {
let frameTime = CMTimeMake(Int64(frameCount), 10)
appendOk = pixelBufferAdaptor.appendPixelBuffer(buffer, withPresentationTime: frameTime)
// appendOk will always be false
NSThread.sleepForTimeInterval(0.05)
} else {
NSThread.sleepForTimeInterval(0.1)
}
j++
}
if (!appendOk) {
println("Doh, frame \(frame) at offset \(frameCount) failed to append")
}
}
input.markAsFinished()
writer.finishWritingWithCompletionHandler({
if writer.status == AVAssetWriterStatus.Failed {
println("oh noes, an error: \(writer.error.description)")
} else {
println("hrmmm, there should be a movie?")
}
})
return true;
}
Where pixelBufferFromCGImage is defined like so:
func pixelBufferFromCGImage(image: CGImageRef, size: CGSize) -> CVPixelBufferRef {
let options = [
kCVPixelBufferCGImageCompatibilityKey: true,
kCVPixelBufferCGBitmapContextCompatibilityKey: true]
var pixBufferPointer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
let status = CVPixelBufferCreate(
nil,
UInt(size.width), UInt(size.height),
OSType(kCVPixelFormatType_32ARGB),
options,
pixBufferPointer)
CVPixelBufferLockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapinfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.NoneSkipFirst.toRaw())
var pixBufferData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixBufferPointer.memory?.takeUnretainedValue())
let context = CGBitmapContextCreate(
pixBufferData,
UInt(size.width), UInt(size.height),
8, UInt(4 * size.width),
rgbColorSpace, bitmapinfo!)
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))
CGContextDrawImage(
context,
CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))),
image)
CVPixelBufferUnlockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
return pixBufferPointer.memory!.takeUnretainedValue()
}

Per the docs for pixelBufferPool:
This property is NULL before the first call to startSessionAtTime:on the associated AVAssetWriter object.
Moving the call to CVPixelBufferPoolCreatePixelBuffer to the end of init should fix the immediate problem.
A few other observations:
You have your AVAssetWriterInputPixelBufferAdaptor configured for BGRA, but in createPixelBufferFromCGImage you're using RGB. Your final videos will look strange if the pixel formats are mismatched.
You don't need to call CVPixelBufferCreate in your createPixelBufferFromCGImage method. This defeats the purpose of using the buffer pool.
If you're running this in a tight loop, memory consumption will become a problem. Using autoreleasepool and being careful with takeUnretainedValue vs takeRetainedValue will help.
I've posted reference implementations for Swift 1.2, 2.0, and 3.0 that use buffer pools.

Related

CIImage pixelBuffer always return nil

I am doing some task to apply filter effect in to my WebRTC call, follow this tutorial:
https://developer.apple.com/documentation/vision/applying_matte_effects_to_people_in_images_and_video
Here is my code to convert:
func capturer(_ capturer: RTCVideoCapturer, didCapture frame: RTCVideoFrame) {
let pixelBufferr = frame.buffer as! RTCCVPixelBuffer
let pixelBufferRef = pixelBufferr.pixelBuffer
if #available(iOS 15.0, *) {
DispatchQueue.global().async {
if let output = GreetingProcessor.shared.processVideoFrame(
foreground: pixelBufferRef,
background: self.vbImage) {
print("new output: \(output) => \(output.pixelBuffer) + \(self.buffer(from: output))")
guard let px = output.pixelBuffer else { return }
let rtcPixelBuffer = RTCCVPixelBuffer(pixelBuffer: px)
let i420buffer = rtcPixelBuffer.toI420()
let newFrame = RTCVideoFrame(buffer: i420buffer, rotation: frame.rotation, timeStampNs: frame.timeStampNs)
self.videoSource.capturer(capturer, didCapture: newFrame)
}
}
}
}
THen here is how I apply effect:
func blendImages(
background: CIImage,
foreground: CIImage,
mask: CIImage,
isRedMask: Bool = false
) -> CIImage? {
// scale mask
let maskScaleX = foreground.extent.width / mask.extent.width
let maskScaleY = foreground.extent.height / mask.extent.height
let maskScaled = mask.transformed(by: __CGAffineTransformMake(maskScaleX, 0, 0, maskScaleY, 0, 0))
// scale background
let backgroundScaleX = (foreground.extent.width / background.extent.width)
let backgroundScaleY = (foreground.extent.height / background.extent.height)
let backgroundScaled = background.transformed(
by: __CGAffineTransformMake(backgroundScaleX, 0, 0, backgroundScaleY, 0, 0))
let blendFilter = isRedMask ? CIFilter.blendWithRedMask() : CIFilter.blendWithMask()
blendFilter.inputImage = foreground
blendFilter.backgroundImage = backgroundScaled
blendFilter.maskImage = maskScaled
return blendFilter.outputImage
}
The problem is output.pixelBuffer always nil, so I can not create RTCFrame to pass it again to delegate
Can someone help?

Extract face images from a large number of photos in Photo Library using CIDetector

I am trying to iterate over images in Photo Library and extract faces using CIDetector. The images are required to keep their original resolutions. To do so, I taking the following steps:
1- Getting assets given a date interval (usually more than a year)
func loadAssets(from fromDate: Date, to toDate: Date, completion: #escaping ([PHAsset]) -> Void) {
fetchQueue.async {
let authStatus = PHPhotoLibrary.authorizationStatus()
if authStatus == .authorized || authStatus == .limited {
let options = PHFetchOptions()
options.predicate = NSPredicate(format: "creationDate >= %# && creationDate <= %#", fromDate as CVarArg, toDate as CVarArg)
options.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)]
let result: PHFetchResult = PHAsset.fetchAssets(with: .image, options: options)
var _assets = [PHAsset]()
result.enumerateObjects { object, count, stop in
_assets.append(object)
}
completion(_assets)
} else {
completion([])
}
}
}
where:
let fetchQueue = DispatchQueue.global(qos: .background)
2- Extracting faces
I then extract face images using:
func detectFaces(in image: UIImage, accuracy: String = CIDetectorAccuracyLow, completion: #escaping ([UIImage]) -> Void) {
faceDetectionQueue.async {
var faceImages = [UIImage]()
let outputImageSize: CGFloat = 200.0 / image.scale
guard let ciImage = CIImage(image: image),
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: accuracy]) else { completion(faceImages); return }
let faces = faceDetector.features(in: ciImage) // Crash happens here
let group = DispatchGroup()
for face in faces {
group.enter()
if let face = face as? CIFaceFeature {
let faceBounds = face.bounds
let offset: CGFloat = floor(min(faceBounds.width, faceBounds.height) * 0.2)
let inset = UIEdgeInsets(top: -offset, left: -offset, bottom: -offset, right: -offset)
let rect = faceBounds.inset(by: inset)
let croppedFaceImage = ciImage.cropped(to: rect)
let scaledImage = croppedFaceImage
.transformed(by: CGAffineTransform(scaleX: outputImageSize / croppedFaceImage.extent.width,
y: outputImageSize / croppedFaceImage.extent.height))
faceImages.append(UIImage(ciImage: scaledImage))
group.leave()
} else {
group.leave()
}
}
group.notify(queue: self.faceDetectionQueue) {
completion(faceImages)
}
}
}
where
private let faceDetectionQueue = DispatchQueue(label: "face detection queue",
qos: DispatchQoS.background,
attributes: [],
autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.workItem,
target: nil)
I use the following extension to get the image from assets:
extension PHAsset {
var image: UIImage {
autoreleasepool {
let manager = PHImageManager.default()
let options = PHImageRequestOptions()
var thumbnail = UIImage()
let rect = CGRect(x: 0, y: 0, width: pixelWidth, height: pixelHeight)
options.isSynchronous = true
options.deliveryMode = .highQualityFormat
options.resizeMode = .exact
options.normalizedCropRect = rect
options.isNetworkAccessAllowed = true
manager.requestImage(for: self, targetSize: rect.size, contentMode: .aspectFit, options: options, resultHandler: {(result, info) -> Void in
if let result = result {
thumbnail = result
} else {
thumbnail = UIImage()
}
})
return thumbnail
}
}
}
The code works fine for a few (usually less that 50) assets, but for more number of images it crashes at:
let faces = faceDetector.features(in: ciImage) // Crash happens here
I get this error:
validateComputeFunctionArguments:858: failed assertion `Compute Function(ciKernelMain): missing sampler binding at index 0 for [0].'
If I reduce the size of the image fed to detectFaces(:) e.g. 400 px, I can analyze a few hundred images (usually less than 1000) but as I mentioned, using the asset's image in the original size is a requirement. My guess is it has something to do with a memory issue when I try to extract faces with CIDetector.
Any idea what this error is about and how I can fix the issue?
I can only guess what could be the issue here, so here are a few ideas:
A CIDetector is an expensive object, so try only to create a single one and re-use it for each image.
Use a single shared CIContext for performing all Core Image operations (more below). Also, pass that to the CIDetector on init. The context manages all resources needed for rendering an image. Sharing it will allow the system to re-use as many resources as possible.
The UIImage(ciImage:) constructor is really tricky. You see, CIImages are basically just recipes for creating an image, not actual bitmaps. They store the instructions for rendering the image. It takes a CIContext to do the actual rendering. When initializing a UIImage with a CIImage, you let UIKit decide how and when to render the image, which, in my experience, caused a lot of issues for other users here on StackOverflow.
Instead, you can use the shared CIContext I mentioned above to render the image first before you make it a UIImage:
let renderedImage = self.ciContext.createCGImage(scaledImage, from: scaledImage.extent).map(UIImage.init)

swift - convert UIImage to pure Black&White and detect DataMatrix

I really need help. I'm creating a DataMatrix reader, and part of codes are with white background only and causes any problem with AVFoundation, but another part has grey with shimmer background (see image below), and this driving me crazy.
What I've tried:
1) AVFoundation with its metaDataOutput works perfect only with white background, and there was no success with shimmer grey
2)zxing - actually can't find any working example for swift, and their sample from GitHub find no Datamatrix on grey too (with datamatrix as qr code type), will be thankful for tutorial or smth like this (zxingObjC for Swift)
3)about 20 libs from cocoapods/github - nothing with grey back again
4) then I found that Vision perfectly detect Datamatrix on white from photo, so I decided to work with this lib and changed the way: no more catching a video output, only UIImages, then handle them and detect DataMatrix using Vision framework.
And to convert colors I've tried:
CIFilters (ColorsControls, NoirEffect), GPU filters (monochrome, luminance, averageLuminance,adaptiveTreshold) playing with params
In the end I have no solution that will work 10 from 10 with my DataMatrix stickers. sometimes it works with GPUImageAverageLuminanceThresholdFilter and GPUImageAdaptiveThresholdFilter, but about 20% luck.
And this 20% luck only at daylight, with electric light comes shimmer-glitter, I think.
Any advice will be helpful for me! Maybe there is nice solution with Zxing for Swift, which I can't find. Or there is no need to use Vision and get frames from AVFoundation, but how?
I-nigma etc. catch my stickers perfectly, from live video, so there should be the way. Android version of my scanner use Zxing, and I guess that Zxing do the job..
My scanning scheme:
fileprivate func createSession(input:AVCaptureDeviceInput) -> Bool {
let session = AVCaptureSession()
if session.canAddInput(input) {
session.addInput(input)
} else {
return false
}
let output = AVCaptureVideoDataOutput()
if session.canAddOutput(output) {
output.setSampleBufferDelegate(self, queue: DispatchQueue(label: "com.output"))
output.videoSettings = [String(kCVPixelBufferPixelFormatTypeKey):kCVPixelFormatType_32BGRA]
output.alwaysDiscardsLateVideoFrames = true
session.addOutput(output)
}
self.videoSession = session
return true
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func convert(cmage:CIImage) -> UIImage
{
let context:CIContext = CIContext.init(options: nil)
let cgImage:CGImage = context.createCGImage(cmage, from: cmage.extent)!
let image:UIImage = UIImage.init(cgImage: cgImage)
return image
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let threshold:Double = 1.0 / 3
let timeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
currentTime = Double(timeStamp.value) / Double(timeStamp.timescale)
if (currentTime - lastTime > threshold) {
if let image = imageFromSampleBuffer(sampleBuffer: sampleBuffer),
let cgImage = image.cgImage {
let ciImage = CIImage(cgImage: cgImage)
// with CIFilter
// let blackAndWhiteImage = ciImage.applyingFilter("CIColorControls", parameters: [kCIInputContrastKey: 2.5,
// kCIInputSaturationKey: 0,
// kCIInputBrightnessKey: 0.5])
// let imageToScan = convert(cmage: blackAndWhiteImage) //UIImage(ciImage: blackAndWhiteImage)
// resImage = imageToScan
// scanBarcode(cgImage: imageToScan.cgImage!)
let filter = GPUImageAverageLuminanceThresholdFilter()
filter.thresholdMultiplier = 0.7
let imageToScan = filter.image(byFilteringImage: image)
resImage = imageToScan!
scanBarcode(cgImage: imageToScan!.cgImage!)
}
}
}
fileprivate func imageFromSampleBuffer(sampleBuffer : CMSampleBuffer) -> UIImage? {
guard let imgBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imgBuffer, CVPixelBufferLockFlags.readOnly)
// Get the number of bytes per row for the pixel buffer
let baseAddress = CVPixelBufferGetBaseAddress(imgBuffer)
// Get the number of bytes per row for the pixel buffer
let bytesPerRow = CVPixelBufferGetBytesPerRow(imgBuffer)
// Get the pixel buffer width and height
let width = CVPixelBufferGetWidth(imgBuffer)
let height = CVPixelBufferGetHeight(imgBuffer)
// Create a device-dependent RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB()
// Create a bitmap graphics context with the sample buffer data
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
//let bitmapInfo: UInt32 = CGBitmapInfo.alphaInfoMask.rawValue
let context = CGContext.init(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
// Create a Quartz image from the pixel data in the bitmap graphics context
let quartzImage = context?.makeImage()
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imgBuffer, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(imgBuffer, .readOnly)
if var image = quartzImage {
if shouldInvert, let inverted = invertImage(image) {
image = inverted
}
let output = UIImage(cgImage: image)
return output
}
return nil
}
fileprivate func scanBarcode(cgImage: CGImage) {
let barcodeRequest = VNDetectBarcodesRequest(completionHandler: { request, _ in
self.parseResults(results: request.results)
})
let handler = VNImageRequestHandler(cgImage: cgImage, options: [.properties : ""])
guard let _ = try? handler.perform([barcodeRequest]) else {
return print("Could not scan")
}
}
fileprivate func parseResults(results: [Any]?) {
guard let results = results else {
return print("No results")
}
print("GOT results - ", results.count)
for result in results {
if let barcode = result as? VNBarcodeObservation {
if let code = barcode.payloadStringValue {
DispatchQueue.main.async {
self.videoSession?.stopRunning()
self.resultLabel.text = code
self.blackWhiteImageView.image = self.resImage //just to check from what image code scanned
}
} else {
print("No results 2")
}
} else {
print("No results 1")
}
}
}
}

How to create a MTLTexture backed by a CVPixelBuffer

What's the correct way to generate a MTLTexture backed by a CVPixelBuffer?
I have the following code, but it seems to leak:
func PixelBufferToMTLTexture(pixelBuffer:CVPixelBuffer) -> MTLTexture
{
var texture:MTLTexture!
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let format:MTLPixelFormat = .BGRA8Unorm
var textureRef : Unmanaged<CVMetalTextureRef>?
let status = CVMetalTextureCacheCreateTextureFromImage(nil,
videoTextureCache!.takeUnretainedValue(),
pixelBuffer,
nil,
format,
width,
height,
0,
&textureRef)
if(status == kCVReturnSuccess)
{
texture = CVMetalTextureGetTexture(textureRef!.takeUnretainedValue())
}
return texture
}
Ah, I was missing: textureRef?.release()

How to initialise CVPixelBufferRef in Swift

We used to initialize CVPixelBufferRef like below.
CVPixelBufferRef pxbuffer = NULL;
But in Swift we can not use NULL, so we tried following but of course XCODE want us to have it initalized to use it
let pxbuffer: CVPixelBufferRef
but how ?
In Obj_C we were creating buffer like this, but as I was trying to explain above when converting to Swift I have been stopped at first line.
CVPixelBufferRef pxbuffer = NULL;
CVPixelBufferCreate(kCFAllocatorDefault, picGenislik,
frameHeight, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
&pxbuffer);
Use CVPixelBufferCreate(_:_:_:_:_:_:) to create the object
Adding some demo code, hope this helps you. Also do read Using Legacy C APIs with Swift
var keyCallBack: CFDictionaryKeyCallBacks
var valueCallBacks: CFDictionaryValueCallBacks
var empty: CFDictionaryRef = CFDictionaryCreate(kCFAllocatorDefault, nil, nil, 0, &keyCallBack, &valueCallBacks)
var attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
1,
&keyCallBack,
&valueCallBacks);
var iOSurfacePropertiesKey = kCVPixelBufferIOSurfacePropertiesKey
withUnsafePointer(&iOSurfacePropertiesKey) { unsafePointer in
CFDictionarySetValue(attributes, unsafePointer, empty)
}
var width = 10
var height = 12
var pixelBuffer: CVPixelBufferRef? = nil
var status: CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32BGRA, attributes, &pixelBuffer)
Here is another way to do it (Swift 3):
var pixelBuffer: UnsafeMutablePointer<CVPixelBuffer?>!
if pixelBuffer == nil {
pixelBuffer = UnsafeMutablePointer<CVPixelBuffer?>.allocate(capacity: MemoryLayout<CVPixelBuffer?>.size)
}
CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32BGRA, attributes, pixelBuffer)
and then you can access the buffer object like this:
pixelBuffer.pointee
EDIT:
pixelBuffer = UnsafeMutablePointer<CVPixelBuffer?>.allocate(capacity: MemoryLayout<CVPixelBuffer?>.size)
Creates a memory leak if you don't manually deallocate the pointer when you're done with it, to avoid this change your code to the following:
var pixelBuffer : CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault, cgimg.width, cgimg.height, kCVPixelFormatType_32BGRA, nil, &pixelBuffer)
func getCVPixelBuffer(_ image: CGImage) -> CVPixelBuffer? {
let imageWidth = Int(image.width)
let imageHeight = Int(image.height)
let attributes : [NSObject:AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : true as AnyObject,
kCVPixelBufferCGBitmapContextCompatibilityKey : true as AnyObject
]
var pxbuffer: CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
imageWidth,
imageHeight,
kCVPixelFormatType_32ARGB,
attributes as CFDictionary?,
&pxbuffer)
if let _pxbuffer = pxbuffer {
let flags = CVPixelBufferLockFlags(rawValue: 0)
CVPixelBufferLockBaseAddress(_pxbuffer, flags)
let pxdata = CVPixelBufferGetBaseAddress(_pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB();
let context = CGContext(data: pxdata,
width: imageWidth,
height: imageHeight,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(_pxbuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
if let _context = context {
_context.draw(image, in: CGRect.init(x: 0, y: 0, width: imageWidth, height: imageHeight))
}
else {
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return nil
}
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return _pxbuffer;
}
return nil
}