import Foundation
import AudioToolbox
class AudioPlay {
//setting buffer num
static let knumberBuffers = 3
var aqData = AQPlayerState.init()
//A custom structure for a playback audio queue
class AQPlayerState {
var mDataFormat = AudioStreamBasicDescription()
var mQueue:AudioQueueRef?
var mBuffers = [AudioQueueBufferRef?].init(repeating: nil, count: AudioPlay.knumberBuffers)
var mAudioFile:AudioFileID?
var bufferByteSize = UInt32()
var mCurrentPacket:Int64?
var mNumPacketsToRead = UInt32()
var mPacketDescs:UnsafeMutablePointer<AudioStreamPacketDescription>?
var mIsRunning = false
}
//playbackAudioQueue callback
static let HandleOutputBuffer:AudioQueueOutputCallback = { (aqData1, inAQ, inBuffer) in
var pAqData = (aqData1?.assumingMemoryBound(to: AQPlayerState.self).pointee)!
guard pAqData.mIsRunning || pAqData.mQueue != nil else{
print("audioplay is not running exit callback func")
return
}
var numBytesReadFromFile = UInt32()
var numPackets = pAqData.mNumPacketsToRead
AudioFileReadPacketData(pAqData.mAudioFile!, false, &numBytesReadFromFile, pAqData.mPacketDescs, pAqData.mCurrentPacket!, &numPackets, inBuffer.pointee.mAudioData)
if numPackets > 0 {
inBuffer.pointee.mAudioDataByteSize = numBytesReadFromFile
AudioQueueEnqueueBuffer(pAqData.mQueue!, inBuffer, ((pAqData.mPacketDescs != nil) ? numPackets : UInt32(0)), pAqData.mPacketDescs)
pAqData.mCurrentPacket! += Int64(numPackets)
}else{
AudioQueueStop(pAqData.mQueue!, false)
pAqData.mIsRunning = false
}
}
//call func to set the property
//create new outputqueue
//start th audioqueue
func start() {
let url = Bundle.main.url(forResource: "123", withExtension: "mp3")!
let audioFileURL = url as CFURL
print(audioFileURL)
let result = AudioFileOpenURL(audioFileURL, .readPermission, 0, &aqData.mAudioFile)
print(result)
var dataFormatSize = UInt32(MemoryLayout.size(ofValue: aqData.mDataFormat))
let result1 = AudioFileGetProperty(aqData.mAudioFile!, kAudioFilePropertyDataFormat,&dataFormatSize, &aqData.mDataFormat)
//get file property
var maxPacketSize = UInt32()
var propertySize = UInt32(MemoryLayout.size(ofValue: maxPacketSize))
let result2 = AudioFileGetProperty(aqData.mAudioFile!, kAudioFilePropertyPacketSizeUpperBound, &propertySize, &maxPacketSize)
//calculate and setting buffer size
DeriveBufferSize(ASBDesc: aqData.mDataFormat, maxPacketSize: maxPacketSize, seconds: 0.5, outBufferSize: &aqData.bufferByteSize, outNumPacketsToRead: &aqData.mNumPacketsToRead)
//check the format is VBR or CBR
let isFormatVBR = aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0
if isFormatVBR {
aqData.mPacketDescs = UnsafeMutablePointer<AudioStreamPacketDescription>.allocate(capacity: MemoryLayout.size(ofValue: AudioStreamPacketDescription()))
}else{
aqData.mPacketDescs = nil
}
//create new audio queue
let result4 = AudioQueueNewOutput(&aqData.mDataFormat,AudioPlay.HandleOutputBuffer, &aqData,CFRunLoopGetCurrent(),CFRunLoopMode.commonModes.rawValue, 0, &aqData.mQueue)
//queue start
aqData.mIsRunning = true
//alloc memory buffer
aqData.mCurrentPacket = 0
for i in 0..<AudioPlay.knumberBuffers {
AudioQueueAllocateBuffer(aqData.mQueue!, aqData.bufferByteSize,&aqData.mBuffers[i])
AudioPlay.HandleOutputBuffer(&aqData,aqData.mQueue!, (aqData.mBuffers[i])!)
}
//start audioqueue
AudioQueueStart(aqData.mQueue!, nil)
repeat{
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 0.25, false)
}while (aqData.mIsRunning)
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 1, false)
}
//calculate and setting buffer size
func DeriveBufferSize(ASBDesc:AudioStreamBasicDescription,maxPacketSize:UInt32,seconds:Float64,outBufferSize:UnsafeMutablePointer<UInt32>,outNumPacketsToRead:UnsafeMutablePointer<UInt32>) {
let maxBufferSize:UInt32 = 0x50000
let minBufferSIze:UInt32 = 0x4000
if ASBDesc.mFramesPerPacket != 0 {
let numPacketsForTime = ASBDesc.mSampleRate / Float64(ASBDesc.mFramesPerPacket) * seconds
outBufferSize.pointee = UInt32(numPacketsForTime) * maxPacketSize
}else{
outBufferSize.pointee = (maxBufferSize > maxPacketSize) ? maxBufferSize:maxPacketSize
}
if outBufferSize.pointee > maxBufferSize && outBufferSize.pointee > maxPacketSize {
outBufferSize.pointee = maxBufferSize
}else{
if outBufferSize.pointee < minBufferSIze{
outBufferSize.pointee = minBufferSIze
}
}
outNumPacketsToRead.pointee = outBufferSize.pointee/maxPacketSize
}
//dispose the audioqueue
func Dispose() {
AudioQueueDispose(aqData.mQueue!, true)
AudioFileClose(aqData.mAudioFile!)
free(aqData.mPacketDescs)
}
}
above code is writed according AudioQueueServiceProgrammingGuide!
create an instence of this class,and call the start() func
compile ok,but no output voice.
I had check the code many times but no advancing
can any one who familiar with audiiqueue help me?
Any help will be appreciated.
replace "AudioFileReadPacketData" with "AudioFileReadPackets" can fix this problem!
but I get a new problem like below sometime! sometimes it works well!
Stream Audio(20535,0x1085ac3c0) malloc: * error for object 0x6080001f6300: Invalid pointer dequeued from free list
* set a breakpoint in malloc_error_break to
Related
I've bought a 3D 4K projector last year, and it uses DLP-Link technology, which requires an 120fps Left/Right alternating video stream for stereoscopy.
I'm writing a player in Swift for Mac using Core Video, and a function require me to pass an UnsafeMutableRawPointer? (void * in Obj-C) to it, and I'm passing the view instance to it using the self keyword.
Here's my code:
//
// MovieView.swift
// Studio Media Player
//
// Created by DannyNiu on 2022-07-03.
//
import Foundation
import Cocoa
import AVFoundation
import CoreVideo
class MovieView : NSView
{
var asset: AVAsset?
var player: AVPlayer?
var item: AVPlayerItem?
var vout: AVPlayerItemVideoOutput?
var cvpb: CVPixelBuffer?
var cii: CIImage?
var cgi: CGImage?
var irect: CGRect?
var vlink: CVDisplayLink?
func setup_displaylink() -> Bool
{
var cvret: CVReturn
cvret = CVDisplayLinkCreateWithActiveCGDisplays(&vlink)
if( vlink == nil ) { return false }
var me: MovieView = self
cvret = CVDisplayLinkSetOutputCallback(
vlink!, vlink_callback, &me)
if( cvret == kCVReturnSuccess ) {
return true
} else { return false }
}
func assign_asset(_ asset: AVAsset)
{
self.asset = asset
item = .init(asset: asset)
player = .init(playerItem: item)
vout = .init()
cvpb = nil
cii = nil
}
func video_render(_ d: CVTimeStamp)
{
let t: CMTime = player!.currentTime()
let cgc: CGContext = (NSGraphicsContext.current?.cgContext)!
let cic: CIContext = (NSGraphicsContext.current?.ciContext)!
if( vout?.hasNewPixelBuffer(forItemTime: t) ?? false )
{
cvpb = vout?.copyPixelBuffer(
forItemTime: t, itemTimeForDisplay: nil)
cii = .init(cvPixelBuffer: cvpb!)
irect = cii!.extent
cgi = cic.createCGImage(cii!, from: irect!)
}
if( irect == nil ) { return }
let orect: CGRect = NSRectToCGRect(bounds)
var vrect: CGRect =
CGRect(origin: orect.origin,
size: CGSize(width: orect.width * 2,
height: irect!.height *
orect.width /
irect!.width))
cgc.setFillColor(gray:0, alpha:1)
cgc.fill(orect)
if( d.videoTime % 2 == 1 )
{
vrect = vrect.offsetBy(dx: -orect.width, dy: 0)
}
cgc.draw(cgi!, in: vrect)
}
}
func vlink_callback(
displayLink: CVDisplayLink,
inNow: UnsafePointer<CVTimeStamp>,
inOutputTime: UnsafePointer<CVTimeStamp>,
flagsIn: CVOptionFlags,
flagsOut: UnsafeMutablePointer<CVOptionFlags>,
arg_mvview: UnsafeMutableRawPointer?
) -> CVReturn
{
let mvview: MovieView = arg_mvview!.load(as: MovieView.self)
mvview.video_render(inNow.pointee)
return kCVReturnSuccess
}
When I debug the program, the mvview.video_render(inNow.pointee) line caused an EXC_BAD_ACCESS trap, with code = 1. I assume this is caused by me not correctly passing self to the display link call-back. So how can I fix this?
Use the Unmanaged class.
in setup_displaylink():
func setup_displaylink() -> Bool
{
var cvret: CVReturn
cvret = CVDisplayLinkCreateWithActiveCGDisplays(&vlink)
if( vlink == nil ) { return false }
var me: UnsafeMutableRawPointer =
Unmanaged.passUnretained(self).toOpaque()
cvret = CVDisplayLinkSetOutputCallback(
vlink!, vlink_callback, &me)
if( cvret == kCVReturnSuccess ) {
return true
} else { return false }
}
and in vlink_callback
func vlink_callback(
displayLink: CVDisplayLink,
inNow: UnsafePointer<CVTimeStamp>,
inOutputTime: UnsafePointer<CVTimeStamp>,
flagsIn: CVOptionFlags,
flagsOut: UnsafeMutablePointer<CVOptionFlags>,
arg_mvview: UnsafeMutableRawPointer?
) -> CVReturn
{
let mvview: MovieView =
Unmanaged.fromOpaque(arg_mvview!).takeUnretainedValue()
mvview.video_render(inNow.pointee)
return kCVReturnSuccess
}
Notice I used the "Unretained" function, because there's no need to add to the reference count of the movie view, as self is being passed around internally. Use "Retained" functions when otherwise appropriate.
I have a class method to read an mp3 file into an AVAudioPCMBuffer as follows:
private(set) var fullAudio: AVAudioPCMBuffer?
func initAudio(audioFileURL: URL) -> Bool {
var status = true
do {
let audioFile = try AVAudioFile(forReading: audioFileURL)
let audioFormat = audioFile.processingFormat
let audioFrameLength = UInt32(audioFile.length)
fullAudio = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: audioFrameLength)
if let fullAudio = fullAudio {
try audioFile.read(into: fullAudio)
// processing of full audio
}
} catch {
status = false
}
return status
}
However, I now need to be able to read the same mp3 info from memory (rather than a file) into the AVAudioPCMBuffer without using the file system, where the info is held in the Data type, for example using a function declaration of the form
func initAudio(audioFileData: Data) -> Bool {
// some code setting up fullAudio
}
How can this be done? I've looked to see whether there is a route from Data holding mp3 info to AVAudioPCMBuffer (e.g. via AVAudioBuffer or AVAudioCompressedBuffer), but haven't seen a way forward.
I went down the rabbit hole on this one. Here is what probably amounts to a Rube Goldberg-esque solution:
A lot of the pain comes from using C from Swift.
func data_AudioFile_ReadProc(_ inClientData: UnsafeMutableRawPointer, _ inPosition: Int64, _ requestCount: UInt32, _ buffer: UnsafeMutableRawPointer, _ actualCount: UnsafeMutablePointer<UInt32>) -> OSStatus {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
let bufferPointer = UnsafeMutableRawBufferPointer(start: buffer, count: Int(requestCount))
let copied = data.copyBytes(to: bufferPointer, from: Int(inPosition) ..< Int(inPosition) + Int(requestCount))
actualCount.pointee = UInt32(copied)
return noErr
}
func data_AudioFile_GetSizeProc(_ inClientData: UnsafeMutableRawPointer) -> Int64 {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
return Int64(data.count)
}
extension Data {
func convertedTo(_ format: AVAudioFormat) -> AVAudioPCMBuffer? {
var data = self
var af: AudioFileID? = nil
var status = AudioFileOpenWithCallbacks(&data, data_AudioFile_ReadProc, nil, data_AudioFile_GetSizeProc(_:), nil, 0, &af)
guard status == noErr, af != nil else {
return nil
}
defer {
AudioFileClose(af!)
}
var eaf: ExtAudioFileRef? = nil
status = ExtAudioFileWrapAudioFileID(af!, false, &eaf)
guard status == noErr, eaf != nil else {
return nil
}
defer {
ExtAudioFileDispose(eaf!)
}
var clientFormat = format.streamDescription.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientDataFormat, UInt32(MemoryLayout.size(ofValue: clientFormat)), &clientFormat)
guard status == noErr else {
return nil
}
if let channelLayout = format.channelLayout {
var clientChannelLayout = channelLayout.layout.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientChannelLayout, UInt32(MemoryLayout.size(ofValue: clientChannelLayout)), &clientChannelLayout)
guard status == noErr else {
return nil
}
}
var frameLength: Int64 = 0
var propertySize: UInt32 = UInt32(MemoryLayout.size(ofValue: frameLength))
status = ExtAudioFileGetProperty(eaf!, kExtAudioFileProperty_FileLengthFrames, &propertySize, &frameLength)
guard status == noErr else {
return nil
}
guard let pcmBuffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(frameLength)) else {
return nil
}
let bufferSizeFrames = 512
let bufferSizeBytes = Int(format.streamDescription.pointee.mBytesPerFrame) * bufferSizeFrames
let numBuffers = format.isInterleaved ? 1 : Int(format.channelCount)
let numInterleavedChannels = format.isInterleaved ? Int(format.channelCount) : 1
let audioBufferList = AudioBufferList.allocate(maximumBuffers: numBuffers)
for i in 0 ..< numBuffers {
audioBufferList[i] = AudioBuffer(mNumberChannels: UInt32(numInterleavedChannels), mDataByteSize: UInt32(bufferSizeBytes), mData: malloc(bufferSizeBytes))
}
defer {
for buffer in audioBufferList {
free(buffer.mData)
}
free(audioBufferList.unsafeMutablePointer)
}
while true {
var frameCount: UInt32 = UInt32(bufferSizeFrames)
status = ExtAudioFileRead(eaf!, &frameCount, audioBufferList.unsafeMutablePointer)
guard status == noErr else {
return nil
}
if frameCount == 0 {
break
}
let src = audioBufferList
let dst = UnsafeMutableAudioBufferListPointer(pcmBuffer.mutableAudioBufferList)
if src.count != dst.count {
return nil
}
for i in 0 ..< src.count {
let srcBuf = src[i]
let dstBuf = dst[i]
memcpy(dstBuf.mData?.advanced(by: Int(dstBuf.mDataByteSize)), srcBuf.mData, Int(srcBuf.mDataByteSize))
}
pcmBuffer.frameLength += frameCount
}
return pcmBuffer
}
}
A more robust solution would probably read the sample rate and channel count and give the option to preserve them.
Tested using:
let url = URL(fileURLWithPath: "/tmp/test.mp3")
let data = try! Data(contentsOf: url)
let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)!
if let d = data.convertedTo(format) {
let avf = try! AVAudioFile(forWriting: URL(fileURLWithPath: "/tmp/foo.wav"), settings: format.settings, commonFormat: format.commonFormat, interleaved: format.isInterleaved)
try! avf.write(from: d)
}
I have to use AudioToolbox instead AVAudioSession for providing stream to SFSpeechRecognizer. I know that I should use AudioQueue, so I made an audio recording export to CMSampleBuffer to read it with recognizer. And while debugging I see that the buffer is added to SFSpeechAudioBufferRecognitionRequest, but the code in the task closure doesn't execute: neither result, nor error.
What's wrong with the code?
let NUM_BUFFERS = 1
struct RecordState {
var dataFormat = AudioStreamBasicDescription()
var queue: AudioQueueRef?
var buffers: [AudioQueueBufferRef] = []
var audioFile: AudioFileID?
var currentPacket: Int64 = 0
var recording = false
}
func сallback(_ inUserData: UnsafeMutableRawPointer?,
_ inAQ: AudioQueueRef,
_ inBuffer: AudioQueueBufferRef,
_ inStartTime: UnsafePointer<AudioTimeStamp>,
_ inNumberPacketDescriptions: UInt32,
_ inPacketDescs: UnsafePointer<AudioStreamPacketDescription>?) {
let recordState = inUserData?.assumingMemoryBound(to: RecordState.self)
if let queue = recordState?.pointee.queue {
AudioQueueEnqueueBuffer(queue, inBuffer, 0, nil)
let rec = AudioRecorder.sharedInstance
rec.transformBuffer(pBuffer: inBuffer, pLength: inBuffer.pointee.mAudioDataByteSize)
}
}
class AudioRecorder: NSObject, ObservableObject, SFSpeechRecognizerDelegate {
let format = AudioStreamBasicDescription(mSampleRate: Float64(16000.0), mFormatID: kAudioFormatLinearPCM, mFormatFlags: kAudioFormatFlagsNativeFloatPacked, mBytesPerPacket: UInt32(MemoryLayout<Float32>.size), mFramesPerPacket: 1, mBytesPerFrame: UInt32(MemoryLayout<Float32>.size), mChannelsPerFrame: 1, mBitsPerChannel: UInt32(MemoryLayout<Float32>.size * 8), mReserved: 0)
var recordState = RecordState()
var startTime = CFAbsoluteTimeGetCurrent()
static var sharedInstance = AudioRecorder()
private var speechRecognizer = SFSpeechRecognizer()!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private var engineEnabled = false
private var lastText = [SFTranscriptionSegment]()
override init() {
super.init()
OperationQueue.main.addOperation {
SFSpeechRecognizer.requestAuthorization { authStatus in
switch authStatus {
case .authorized:
self.engineEnabled = true
default:
self.engineEnabled = false
}
}
}
self.speechRecognizer.delegate = self
}
func startRecording() {
recordState.dataFormat = format
var queue: AudioQueueRef?
if AudioQueueNewInput(&recordState.dataFormat, сallback, &recordState, CFRunLoopGetCurrent(), CFRunLoopMode.commonModes.rawValue, 0, &queue) == noErr {
recordState.queue = queue
} else {
return
}
for _ in 0..<NUM_BUFFERS {
var buffer: AudioQueueBufferRef?
if AudioQueueAllocateBuffer(queue!, 1024, &buffer) == noErr {
recordState.buffers.append(buffer!)
}
AudioQueueEnqueueBuffer(queue!, buffer!, 0, nil)
}
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }
recognitionRequest.shouldReportPartialResults = true
// Keep speech recognition data on device
if #available(iOS 13, *) {
recognitionRequest.requiresOnDeviceRecognition = true
}
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
var isFinal = false
if let result = result {
print(result.bestTranscription.formattedString)
isFinal = result.isFinal
}
if error != nil || isFinal {
// Stop recognizing speech if there is a problem.
self.recognitionRequest = nil
self.recognitionTask = nil
}
}
recordState.recording = true
if AudioQueueStart(recordState.queue!, nil) != noErr {
fatalError("Something is wrong")
}
self.startTime = CFAbsoluteTimeGetCurrent()
}
func stopRecording() {
recordState.recording = false
AudioQueueStop(recordState.queue!, true)
for i in 0..<NUM_BUFFERS {
if let buffers = recordState.buffers[i] as? AudioQueueBufferRef {
AudioQueueFreeBuffer(recordState.queue!, buffers)
}
}
AudioQueueDispose(recordState.queue!, true)
if let file = recordState.audioFile {
AudioFileClose(file)
}
}
func transformBuffer(pBuffer: AudioQueueBufferRef, pLength: UInt32) {
var blockBuffer: CMBlockBuffer?
CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault, memoryBlock: pBuffer, blockLength: Int(pLength), blockAllocator: kCFAllocatorNull, customBlockSource: nil, offsetToData: 0, dataLength: Int(pLength), flags: kCMBlockBufferAssureMemoryNowFlag, blockBufferOut: &blockBuffer)
let timeFormat = format.mSampleRate
let currentTime = CFAbsoluteTimeGetCurrent()
let elapsedTime: CFTimeInterval = currentTime - self.startTime
let timeStamp = CMTimeMake(value: Int64(elapsedTime * timeFormat), timescale: Int32(timeFormat))
let nSamples = Int(pLength / format.mBytesPerFrame)
do {
let formatDescription = try CMAudioFormatDescription(audioStreamBasicDescription: format)
var sampleBuffer: CMSampleBuffer?
CMAudioSampleBufferCreateWithPacketDescriptions(allocator: kCFAllocatorDefault, dataBuffer: blockBuffer, dataReady: true, makeDataReadyCallback: nil, refcon: nil, formatDescription: formatDescription, sampleCount: nSamples, presentationTimeStamp: timeStamp, packetDescriptions: nil, sampleBufferOut: &sampleBuffer)
if let sBuffer = sampleBuffer {
self.recognitionRequest?.appendAudioSampleBuffer(sBuffer)
}
} catch {
fatalError(error.localizedDescription)
}
}
}
UPD: I modified the code so it could be more descriptive
Finally, I've found the answer. Here's the code for the conversion of AudioQueueBufferRef into AVAudioPCMBuffer:
func queueBufferToAudioBuffer(_ buffer: AudioQueueBufferRef) -> AVAudioPCMBuffer? {
guard let audioFormat = AVAudioFormat(
commonFormat: .pcmFormatFloat32,
sampleRate: format.mSampleRate,
channels: format.mChannelsPerFrame,
interleaved: true)
else { return nil }
let frameLength = buffer.pointee.mAudioDataBytesCapacity / audioFormat.streamDescription.pointee.mBytesPerFrame
guard let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: frameLength) else { return nil }
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
let src = buffer.pointee.mAudioData.bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
return audioBuffer
}
I fixed this by setting up the AVAudioSession before AudioQueueStart.
do{
try AVAudioSession.sharedInstance().setCategory(.record, mode:.default)
try AVAudioSession.sharedInstance().setActive(true)
} catch{
print(error)
}
I am using these piece of code to create a timelapse movie with a collection of images.
https://github.com/acj/TimeLapseBuilder-Swift/blob/master/TimeLapseBuilder/TimeLapseBuilder.swift
So now I want to add a specific duration between the images. For example... every image should be showed for 3 seconds but the fps of the video should be still 30 fps.
Can somebody help me with this?
videoWriterInput.requestMediaDataWhenReady(on: media_queue) {
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
let currentProgress = Progress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
var remainingPhotoURLs = [String](self.photoURLs)
while videoWriterInput.isReadyForMoreMediaData && !remainingPhotoURLs.isEmpty {
let nextPhotoURL = remainingPhotoURLs.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(nextPhotoURL, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(
domain: kErrorDomain,
code: kFailedToAppendPixelBufferError,
userInfo: ["description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer"]
)
break
}
frameCount += 1
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
if let error = error {
failure(error)
} else {
success(videoOutputURL)
}
self.videoWriter = nil
}
}
} else {
error = NSError(
domain: kErrorDomain,
code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
Many thanks for your help!
I would like to detect a specific USB is plugged in/removed in my application. For now, I can get the deviceName with this tutorial Working With USB Device Interfaces. But, how can I do the callback function of (deviceAdded)IOServiceMatchingCallBack in Swift.
I tried as follows, but I got an error: Cannot convert value of type '(UnsafePointer, iterator: io_iterator_t) -> ()' to expected argument type 'IOServiceMatchingCallback!'
func detectUSBEvent() {
var portIterator: io_iterator_t = 0
var kr: kern_return_t = KERN_FAILURE
let matchingDict = IOServiceMatching(kIOUSBDeviceClassName)
let vendorIDString = kUSBVendorID as CFStringRef!
let productIDString = kUSBProductID as CFStringRef!
CFDictionarySetValue(matchingDict, unsafeAddressOf(vendorIDString), unsafeAddressOf(VendorID))
CFDictionarySetValue(matchingDict, unsafeAddressOf(productIDString), unsafeAddressOf(ProductID))
// To set up asynchronous notifications, create a notification port and add its run loop event source to the program’s run loop
let gNotifyPort: IONotificationPortRef = IONotificationPortCreate(kIOMasterPortDefault)
let runLoopSource: Unmanaged<CFRunLoopSource>! = IONotificationPortGetRunLoopSource(gNotifyPort)
let gRunLoop: CFRunLoop! = CFRunLoopGetCurrent()
CFRunLoopAddSource(gRunLoop, runLoopSource.takeUnretainedValue(), kCFRunLoopDefaultMode)
// Notification of first match:
kr = IOServiceAddMatchingNotification(gNotifyPort, kIOFirstMatchNotification, matchingDict, deviceAdded, nil, &portIterator)
deviceAdded(nil, iterator: portIterator)
}
func deviceAdded(refCon: UnsafePointer<Void>, iterator: io_iterator_t) {
if let usbDevice: io_service_t = IOIteratorNext(iterator)
{
let name = String()
let cs = (name as NSString).UTF8String
let deviceName: UnsafeMutablePointer<Int8> = UnsafeMutablePointer<Int8>(cs)
let kr: kern_return_t = IORegistryEntryGetName(usbDevice, deviceName)
if kr == KERN_SUCCESS {
let deviceNameAsCFString = CFStringCreateWithCString(kCFAllocatorDefault, deviceName,
kCFStringEncodingASCII)
print(deviceNameAsCFString)
// if deviceNameAsCFString == XXX
// Do Something
}
}
}
Here's a Swift 3 version, using closures instead of global functions (a closure w/o a context can be bridged to a C function pointer), using GCD instead of Runloops (much nicer API), using callbacks and dispatches to inform about events and using real objects instead of static objects or singletons:
import Darwin
import IOKit
import IOKit.usb
import Foundation
class IOUSBDetector {
enum Event {
case Matched
case Terminated
}
let vendorID: Int
let productID: Int
var callbackQueue: DispatchQueue?
var callback: (
( _ detector: IOUSBDetector, _ event: Event,
_ service: io_service_t
) -> Void
)?
private
let internalQueue: DispatchQueue
private
let notifyPort: IONotificationPortRef
private
var matchedIterator: io_iterator_t = 0
private
var terminatedIterator: io_iterator_t = 0
private
func dispatchEvent (
event: Event, iterator: io_iterator_t
) {
repeat {
let nextService = IOIteratorNext(iterator)
guard nextService != 0 else { break }
if let cb = self.callback, let q = self.callbackQueue {
q.async {
cb(self, event, nextService)
IOObjectRelease(nextService)
}
} else {
IOObjectRelease(nextService)
}
} while (true)
}
init? ( vendorID: Int, productID: Int ) {
self.vendorID = vendorID
self.productID = productID
self.internalQueue = DispatchQueue(label: "IODetector")
guard let notifyPort = IONotificationPortCreate(kIOMasterPortDefault) else {
return nil
}
self.notifyPort = notifyPort
IONotificationPortSetDispatchQueue(notifyPort, self.internalQueue)
}
deinit {
self.stopDetection()
}
func startDetection ( ) -> Bool {
guard matchedIterator == 0 else { return true }
let matchingDict = IOServiceMatching(kIOUSBDeviceClassName)
as NSMutableDictionary
matchingDict[kUSBVendorID] = NSNumber(value: vendorID)
matchingDict[kUSBProductID] = NSNumber(value: productID)
let matchCallback: IOServiceMatchingCallback = {
(userData, iterator) in
let detector = Unmanaged<IOUSBDetector>
.fromOpaque(userData!).takeUnretainedValue()
detector.dispatchEvent(
event: .Matched, iterator: iterator
)
};
let termCallback: IOServiceMatchingCallback = {
(userData, iterator) in
let detector = Unmanaged<IOUSBDetector>
.fromOpaque(userData!).takeUnretainedValue()
detector.dispatchEvent(
event: .Terminated, iterator: iterator
)
};
let selfPtr = Unmanaged.passUnretained(self).toOpaque()
let addMatchError = IOServiceAddMatchingNotification(
self.notifyPort, kIOFirstMatchNotification,
matchingDict, matchCallback, selfPtr, &self.matchedIterator
)
let addTermError = IOServiceAddMatchingNotification(
self.notifyPort, kIOTerminatedNotification,
matchingDict, termCallback, selfPtr, &self.terminatedIterator
)
guard addMatchError == 0 && addTermError == 0 else {
if self.matchedIterator != 0 {
IOObjectRelease(self.matchedIterator)
self.matchedIterator = 0
}
if self.terminatedIterator != 0 {
IOObjectRelease(self.terminatedIterator)
self.terminatedIterator = 0
}
return false
}
// This is required even if nothing was found to "arm" the callback
self.dispatchEvent(event: .Matched, iterator: self.matchedIterator)
self.dispatchEvent(event: .Terminated, iterator: self.terminatedIterator)
return true
}
func stopDetection ( ) {
guard self.matchedIterator != 0 else { return }
IOObjectRelease(self.matchedIterator)
IOObjectRelease(self.terminatedIterator)
self.matchedIterator = 0
self.terminatedIterator = 0
}
}
And here is some simple test code to test that class (set product and vendor ID as appropriate for your USB device):
let test = IOUSBDetector(vendorID: 0x4e8, productID: 0x1a23)
test?.callbackQueue = DispatchQueue.global()
test?.callback = {
(detector, event, service) in
print("Event \(event)")
};
_ = test?.startDetection()
while true { sleep(1) }
It works after I put the callback function out the class. However, I don't know why.
class IODetection {
class func monitorUSBEvent(VendorID: Int, ProductID: Int) {
var portIterator: io_iterator_t = 0
var kr: kern_return_t = KERN_FAILURE
let matchingDict = IOServiceMatching(kIOUSBDeviceClassName)
// Add the VENDOR and PRODUCT IDs to the matching dictionary.
let vendorIDString = kUSBVendorID as CFStringRef!
let productIDString = kUSBProductID as CFStringRef!
CFDictionarySetValue(matchingDict, unsafeAddressOf(vendorIDString), unsafeAddressOf(VendorID))
CFDictionarySetValue(matchingDict, unsafeAddressOf(productIDString), unsafeAddressOf(ProductID))
// To set up asynchronous notifications, create a notification port and add its run loop event source to the program’s run loop
let gNotifyPort: IONotificationPortRef = IONotificationPortCreate(kIOMasterPortDefault)
let runLoopSource: Unmanaged<CFRunLoopSource>! = IONotificationPortGetRunLoopSource(gNotifyPort)
let gRunLoop: CFRunLoop! = CFRunLoopGetCurrent()
CFRunLoopAddSource(gRunLoop, runLoopSource.takeRetainedValue(), kCFRunLoopDefaultMode)
// MARK: - USB in Notification
let observer = UnsafeMutablePointer<Void>(unsafeAddressOf(self))
kr = IOServiceAddMatchingNotification(gNotifyPort,
kIOMatchedNotification,
matchingDict,
deviceAdded,
observer,
&portIterator)
deviceAdded(nil, iterator: portIterator)
// MARK: - USB remove Notification
kr = IOServiceAddMatchingNotification(gNotifyPort,
kIOTerminatedNotification,
matchingDict,
deviceRemoved,
observer,
&portIterator)
deviceRemoved(nil, iterator: portIterator)
}
}
func deviceAdded(refCon: UnsafeMutablePointer<Void>, iterator: io_iterator_t) -> Void {
var kr: kern_return_t = KERN_FAILURE
while case let usbDevice = IOIteratorNext(iterator) where usbDevice != 0 {
let deviceNameAsCFString = UnsafeMutablePointer<io_name_t>.alloc(1)
defer {deviceNameAsCFString.dealloc(1)}
kr = IORegistryEntryGetName(usbDevice, UnsafeMutablePointer(deviceNameAsCFString))
if kr != KERN_SUCCESS {
deviceNameAsCFString.memory.0 = 0
}
let deviceName = String.fromCString(UnsafePointer(deviceNameAsCFString))
print("Device Added: \(deviceName!)")
// Do something if I get the specific device
if deviceName == "YOUR DEVICE" {
/// Your Action HERE
}
IOObjectRelease(usbDevice)
}
}
My problem was that I wasn't using the iterator in my callback function, so that function wasn't even getting called! Seems like strange behaviour to me, but that was my problem.