AVAssetWriterInput - add image with a specific duration - swift

I am using these piece of code to create a timelapse movie with a collection of images.
https://github.com/acj/TimeLapseBuilder-Swift/blob/master/TimeLapseBuilder/TimeLapseBuilder.swift
So now I want to add a specific duration between the images. For example... every image should be showed for 3 seconds but the fps of the video should be still 30 fps.
Can somebody help me with this?
videoWriterInput.requestMediaDataWhenReady(on: media_queue) {
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
let currentProgress = Progress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
var remainingPhotoURLs = [String](self.photoURLs)
while videoWriterInput.isReadyForMoreMediaData && !remainingPhotoURLs.isEmpty {
let nextPhotoURL = remainingPhotoURLs.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(nextPhotoURL, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(
domain: kErrorDomain,
code: kFailedToAppendPixelBufferError,
userInfo: ["description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer"]
)
break
}
frameCount += 1
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
if let error = error {
failure(error)
} else {
success(videoOutputURL)
}
self.videoWriter = nil
}
}
} else {
error = NSError(
domain: kErrorDomain,
code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
Many thanks for your help!

Related

Stream Microphone Audio from one device to another using Multipeer connectivy and EZAudio

[TLDR: Receiving an ASSERTION FAILURE on CABufferList.h (find error at the bottom) when trying to save streamed audio data]
I am having trouble saving microphone audio that is streamed between devices using Multipeer Connectivity. So far I have two devices connected to each other using Multipeer Connectivity and have them sending messages and streams to each other.
Finally I have the StreamDelegate method
func stream(_ aStream: Stream, handle eventCode: Stream.Event) {
// create a buffer for capturing the inputstream data
let bufferSize = 2048
let buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bufferSize)
defer {
buffer.deallocate()
}
var audioBuffer: AudioBuffer!
var audioBufferList: AudioBufferList!
switch eventCode {
case .hasBytesAvailable:
// if the input stream has bytes available
// return the actual number of bytes placed in the buffer;
let read = self.inputStream.read(buffer, maxLength: bufferSize)
if read < 0 {
//Stream error occured
print(self.inputStream.streamError!)
} else if read == 0 {
//EOF
break
}
guard let mData = UnsafeMutableRawPointer(buffer) else { return }
audioBuffer = AudioBuffer(mNumberChannels: 1, mDataByteSize: UInt32(read), mData: mData)
audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: audioBuffer)
let audioBufferListPointer = UnsafeMutablePointer<AudioBufferList>.allocate(capacity: read)
audioBufferListPointer.pointee = audioBufferList
DispatchQueue.main.async {
if self.ezRecorder == nil {
self.recordAudio()
}
self.ezRecorder?.appendData(from: audioBufferListPointer, withBufferSize: UInt32(read))
}
print("hasBytesAvailable \(audioBuffer!)")
case .endEncountered:
print("endEncountered")
if self.inputStream != nil {
self.inputStream.delegate = nil
self.inputStream.remove(from: .current, forMode: .default)
self.inputStream.close()
self.inputStream = nil
}
case .errorOccurred:
print("errorOccurred")
case .hasSpaceAvailable:
print("hasSpaceAvailable")
case .openCompleted:
print("openCompleted")
default:
break
}
}
I am getting the stream of data however when I try to save it as an audio file using EZRecorder, I get the following error message
[default] CABufferList.h:184 ASSERTION FAILURE [(nBytes <= buf->mDataByteSize) != 0 is false]:
I suspect the error could be arising when I create AudioStreamBasicDescription for EZRecorder.
I understand there may be other errors here and I appreciate any suggestions to solve the bug and improve the code. Thanks
EZAudio comes with TPCircularBuffer - use that.
Because writing the buffer to file is an async operation, this becomes a great use case for a circular buffer where we have one producer and one consumer.
Use the EZAudioUtilities where possible.
Update: EZRecorder write expects bufferSize to be number of frames to write and not bytes
So something like this should work:
class StreamDelegateInstance: NSObject {
private static let MaxReadSize = 2048
private static let BufferSize = MaxReadSize * 4
private var availableReadBytesPtr = UnsafeMutablePointer<Int32>.allocate(capacity: 1)
private var availableWriteBytesPtr = UnsafeMutablePointer<Int32>.allocate(capacity: 1)
private var ezRecorder: EZRecorder?
private var buffer = UnsafeMutablePointer<TPCircularBuffer>.allocate(capacity: 1)
private var inputStream: InputStream?
init(inputStream: InputStream? = nil) {
self.inputStream = inputStream
super.init()
EZAudioUtilities.circularBuffer(buffer, withSize: Int32(StreamDelegateInstance.BufferSize))
ensureWriteStream()
}
deinit {
EZAudioUtilities.freeCircularBuffer(buffer)
buffer.deallocate()
availableReadBytesPtr.deallocate()
availableWriteBytesPtr.deallocate()
self.ezRecorder?.closeAudioFile()
self.ezRecorder = nil
}
private func ensureWriteStream() {
guard self.ezRecorder == nil else { return }
// stores audio to temporary folder
let audioOutputPath = NSTemporaryDirectory() + "audioOutput2.aiff"
let audioOutputURL = URL(fileURLWithPath: audioOutputPath)
print(audioOutputURL)
// let audioStreamBasicDescription = AudioStreamBasicDescription(mSampleRate: 44100.0, mFormatID: kAudioFormatLinearPCM, mFormatFlags: kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked, mBytesPerPacket: 4, mFramesPerPacket: 1, mBytesPerFrame: 4, mChannelsPerFrame: 1, mBitsPerChannel: 32, mReserved: 1081729024)
// EZAudioUtilities.audioBufferList(withNumberOfFrames: <#T##UInt32#>,
// numberOfChannels: 1,
// interleaved: true)
// if you don't need a custom format, consider using EZAudioUtilities.m4AFormat
let format = EZAudioUtilities.aiffFormat(withNumberOfChannels: 1,
sampleRate: 44800)
self.ezRecorder = EZRecorder.init(url: audioOutputURL,
clientFormat: format,
fileType: .AIFF)
}
private func writeStream() {
let ptr = TPCircularBufferTail(buffer, availableWriteBytesPtr)
// ensure we have non 0 bytes to write - which should always be true, but you may want to refactor things
guard availableWriteBytesPtr.pointee > 0 else { return }
let framesToWrite = availableWriteBytesPtr.pointee / 4 // sizeof(float)
let audioBuffer = AudioBuffer(mNumberChannels: 1,
mDataByteSize: UInt32(availableWriteBytesPtr.pointee),
mData: ptr)
let audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: audioBuffer)
self.ezRecorder?.appendData(from: &audioBufferList,
withBufferSize: UInt32(framesToWrite))
TPCircularBufferConsume(buffer, framesToWrite * 4)
}
}
extension StreamDelegateInstance: StreamDelegate {
func stream(_ aStream: Stream, handle eventCode: Stream.Event) {
switch eventCode {
case .hasBytesAvailable:
// if the input stream has bytes available
// return the actual number of bytes placed in the buffer;
guard let ptr = TPCircularBufferHead(buffer, availableReadBytesPtr) else {
print("couldn't get buffer ptr")
break;
}
let bytedsToRead = min(Int(availableReadBytesPtr.pointee), StreamDelegateInstance.MaxReadSize)
let mutablePtr = ptr.bindMemory(to: UInt8.self, capacity: Int(bytedsToRead))
let bytesRead = self.inputStream?.read(mutablePtr,
maxLength: bytedsToRead) ?? 0
if bytesRead < 0 {
//Stream error occured
print(self.inputStream?.streamError! ?? "No bytes read")
break
} else if bytesRead == 0 {
//EOF
break
}
TPCircularBufferProduce(buffer, Int32(bytesRead))
DispatchQueue.main.async { [weak self] in
self?.writeStream()
}
case .endEncountered:
print("endEncountered")
if self.inputStream != nil {
self.inputStream?.delegate = nil
self.inputStream?.remove(from: .current, forMode: .default)
self.inputStream?.close()
self.inputStream = nil
}
case .errorOccurred:
print("errorOccurred")
case .hasSpaceAvailable:
print("hasSpaceAvailable")
case .openCompleted:
print("openCompleted")
default:
break
}
}
}

How can mp3 data in memory be loaded into an AVAudioPCMBuffer in Swift?

I have a class method to read an mp3 file into an AVAudioPCMBuffer as follows:
private(set) var fullAudio: AVAudioPCMBuffer?
func initAudio(audioFileURL: URL) -> Bool {
var status = true
do {
let audioFile = try AVAudioFile(forReading: audioFileURL)
let audioFormat = audioFile.processingFormat
let audioFrameLength = UInt32(audioFile.length)
fullAudio = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: audioFrameLength)
if let fullAudio = fullAudio {
try audioFile.read(into: fullAudio)
// processing of full audio
}
} catch {
status = false
}
return status
}
However, I now need to be able to read the same mp3 info from memory (rather than a file) into the AVAudioPCMBuffer without using the file system, where the info is held in the Data type, for example using a function declaration of the form
func initAudio(audioFileData: Data) -> Bool {
// some code setting up fullAudio
}
How can this be done? I've looked to see whether there is a route from Data holding mp3 info to AVAudioPCMBuffer (e.g. via AVAudioBuffer or AVAudioCompressedBuffer), but haven't seen a way forward.
I went down the rabbit hole on this one. Here is what probably amounts to a Rube Goldberg-esque solution:
A lot of the pain comes from using C from Swift.
func data_AudioFile_ReadProc(_ inClientData: UnsafeMutableRawPointer, _ inPosition: Int64, _ requestCount: UInt32, _ buffer: UnsafeMutableRawPointer, _ actualCount: UnsafeMutablePointer<UInt32>) -> OSStatus {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
let bufferPointer = UnsafeMutableRawBufferPointer(start: buffer, count: Int(requestCount))
let copied = data.copyBytes(to: bufferPointer, from: Int(inPosition) ..< Int(inPosition) + Int(requestCount))
actualCount.pointee = UInt32(copied)
return noErr
}
func data_AudioFile_GetSizeProc(_ inClientData: UnsafeMutableRawPointer) -> Int64 {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
return Int64(data.count)
}
extension Data {
func convertedTo(_ format: AVAudioFormat) -> AVAudioPCMBuffer? {
var data = self
var af: AudioFileID? = nil
var status = AudioFileOpenWithCallbacks(&data, data_AudioFile_ReadProc, nil, data_AudioFile_GetSizeProc(_:), nil, 0, &af)
guard status == noErr, af != nil else {
return nil
}
defer {
AudioFileClose(af!)
}
var eaf: ExtAudioFileRef? = nil
status = ExtAudioFileWrapAudioFileID(af!, false, &eaf)
guard status == noErr, eaf != nil else {
return nil
}
defer {
ExtAudioFileDispose(eaf!)
}
var clientFormat = format.streamDescription.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientDataFormat, UInt32(MemoryLayout.size(ofValue: clientFormat)), &clientFormat)
guard status == noErr else {
return nil
}
if let channelLayout = format.channelLayout {
var clientChannelLayout = channelLayout.layout.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientChannelLayout, UInt32(MemoryLayout.size(ofValue: clientChannelLayout)), &clientChannelLayout)
guard status == noErr else {
return nil
}
}
var frameLength: Int64 = 0
var propertySize: UInt32 = UInt32(MemoryLayout.size(ofValue: frameLength))
status = ExtAudioFileGetProperty(eaf!, kExtAudioFileProperty_FileLengthFrames, &propertySize, &frameLength)
guard status == noErr else {
return nil
}
guard let pcmBuffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(frameLength)) else {
return nil
}
let bufferSizeFrames = 512
let bufferSizeBytes = Int(format.streamDescription.pointee.mBytesPerFrame) * bufferSizeFrames
let numBuffers = format.isInterleaved ? 1 : Int(format.channelCount)
let numInterleavedChannels = format.isInterleaved ? Int(format.channelCount) : 1
let audioBufferList = AudioBufferList.allocate(maximumBuffers: numBuffers)
for i in 0 ..< numBuffers {
audioBufferList[i] = AudioBuffer(mNumberChannels: UInt32(numInterleavedChannels), mDataByteSize: UInt32(bufferSizeBytes), mData: malloc(bufferSizeBytes))
}
defer {
for buffer in audioBufferList {
free(buffer.mData)
}
free(audioBufferList.unsafeMutablePointer)
}
while true {
var frameCount: UInt32 = UInt32(bufferSizeFrames)
status = ExtAudioFileRead(eaf!, &frameCount, audioBufferList.unsafeMutablePointer)
guard status == noErr else {
return nil
}
if frameCount == 0 {
break
}
let src = audioBufferList
let dst = UnsafeMutableAudioBufferListPointer(pcmBuffer.mutableAudioBufferList)
if src.count != dst.count {
return nil
}
for i in 0 ..< src.count {
let srcBuf = src[i]
let dstBuf = dst[i]
memcpy(dstBuf.mData?.advanced(by: Int(dstBuf.mDataByteSize)), srcBuf.mData, Int(srcBuf.mDataByteSize))
}
pcmBuffer.frameLength += frameCount
}
return pcmBuffer
}
}
A more robust solution would probably read the sample rate and channel count and give the option to preserve them.
Tested using:
let url = URL(fileURLWithPath: "/tmp/test.mp3")
let data = try! Data(contentsOf: url)
let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)!
if let d = data.convertedTo(format) {
let avf = try! AVAudioFile(forWriting: URL(fileURLWithPath: "/tmp/foo.wav"), settings: format.settings, commonFormat: format.commonFormat, interleaved: format.isInterleaved)
try! avf.write(from: d)
}

AVAudioEngine, using 3D Audio

I would like to use AVAudioEngine for a 3D audio effect, where the sound source circles the user head. The source appears to move from left to right but I've been unable to figure out how to make it circle the users head. The audio source must be mono or it cant work.
I dont understand AVAudioMake3DVectorOrientation and AVAudioMake3DAngularOrientation.
I thought my math was correct but I suspect that if it were, I would have gotten the results I was looking for.
This is bare bones, so there isn't much error checking.
Would someone provide guidance to get me on track?
Thank you,
W.
import AVFoundation
class ThreeDAudio {
var _angleIndx = 0.0
var _engine : AVAudioEngine!
var _player : AVAudioPlayerNode!
var _environment : AVAudioEnvironmentNode!
var _circleTimer : Timer!
func initTimer()
{
_circleTimer = Timer.scheduledTimer(timeInterval: 0.10, target: self,
selector: #selector(ThreeDAudio.updatePosition),userInfo: nil, repeats: true)
}
#objc func updatePosition()
{
let centerX = 0.0
let centerY = 0.0
let radius = 10.0
let degToRads = Double.pi / 180.0
let angle = _angleIndx * degToRads
let x = centerX + sin(angle) * radius
let y = centerY + cos(angle) * radius
let z = 0.0
let posInSpace = AVAudioMake3DPoint(Float(x), Float(y), Float(z))
_angleIndx += 1.0
_player.position = posInSpace
print("angle: \(_angleIndx) , \(posInSpace)")
if(_angleIndx == 360.0) { _circleTimer.invalidate() }
}
func getBufferFromFileInBundle(fileName: String, fileType: String) -> AVAudioPCMBuffer?
{
// audio MUST be a monoaural source or it cant work in 3D
var file:AVAudioFile
var audioBuffer : AVAudioPCMBuffer? = nil
let path = Bundle.main.path(forResource: fileName, ofType: fileType)!
do{
file = try AVAudioFile(forReading: URL(fileURLWithPath:path))
audioBuffer = AVAudioPCMBuffer(pcmFormat:(file.processingFormat), frameCapacity: AVAudioFrameCount(file.length))
try file.read(into: audioBuffer!)
} catch let error as NSError {
print("Error AVAudioFile:\(error)")
}
return audioBuffer
}
func outputFormat() -> AVAudioFormat
{
let outputFormat = _engine.outputNode.outputFormat(forBus: 0)
let nChannels = outputFormat.channelCount // testing, will always be 2 channels
let sampleRate = outputFormat.sampleRate
return AVAudioFormat(standardFormatWithSampleRate: sampleRate, channels: nChannels)
}
func setupEngine(_ audioBuffer: AVAudioPCMBuffer )
{
_engine = AVAudioEngine()
_player = AVAudioPlayerNode()
_environment = AVAudioEnvironmentNode()
_player.renderingAlgorithm = .HRTF
_engine.attach(_player)
_engine.attach(_environment)
_engine.connect(_player, to: _environment, format: audioBuffer.format)
_engine.connect(_environment, to: _engine.outputNode, format: outputFormat())
_environment.listenerPosition = AVAudioMake3DPoint(0.0, 0.0, 0.0);
_environment.listenerVectorOrientation = AVAudioMake3DVectorOrientation(AVAudioMake3DVector(0, 0, -1), AVAudioMake3DVector(0, 0, 0))
_environment.listenerAngularOrientation = AVAudioMake3DAngularOrientation(0.0,0.0, 0.0)
do{
try _engine.start()
} catch let error as NSError {
print("Error start:\(error)")
}
}
func startAudioTest()
{
var thisFile = (name: "", fileType: "")
thisFile = (name: "sound_voices", fileType: "wav")
thisFile = (name: "Bouncing-Ball-MONO", fileType: "aiff")
let audioBuffer = getBufferFromFileInBundle(fileName: thisFile.name, fileType: thisFile.fileType )
if ( audioBuffer != nil )
{
setupEngine( audioBuffer! )
initTimer()
_player.scheduleBuffer(audioBuffer!, at: nil, options: .loops, completionHandler: nil)
_player.play()
}
}
}

AudioQueueStart But no voice

import Foundation
import AudioToolbox
class AudioPlay {
//setting buffer num
static let knumberBuffers = 3
var aqData = AQPlayerState.init()
//A custom structure for a playback audio queue
class AQPlayerState {
var mDataFormat = AudioStreamBasicDescription()
var mQueue:AudioQueueRef?
var mBuffers = [AudioQueueBufferRef?].init(repeating: nil, count: AudioPlay.knumberBuffers)
var mAudioFile:AudioFileID?
var bufferByteSize = UInt32()
var mCurrentPacket:Int64?
var mNumPacketsToRead = UInt32()
var mPacketDescs:UnsafeMutablePointer<AudioStreamPacketDescription>?
var mIsRunning = false
}
//playbackAudioQueue callback
static let HandleOutputBuffer:AudioQueueOutputCallback = { (aqData1, inAQ, inBuffer) in
var pAqData = (aqData1?.assumingMemoryBound(to: AQPlayerState.self).pointee)!
guard pAqData.mIsRunning || pAqData.mQueue != nil else{
print("audioplay is not running exit callback func")
return
}
var numBytesReadFromFile = UInt32()
var numPackets = pAqData.mNumPacketsToRead
AudioFileReadPacketData(pAqData.mAudioFile!, false, &numBytesReadFromFile, pAqData.mPacketDescs, pAqData.mCurrentPacket!, &numPackets, inBuffer.pointee.mAudioData)
if numPackets > 0 {
inBuffer.pointee.mAudioDataByteSize = numBytesReadFromFile
AudioQueueEnqueueBuffer(pAqData.mQueue!, inBuffer, ((pAqData.mPacketDescs != nil) ? numPackets : UInt32(0)), pAqData.mPacketDescs)
pAqData.mCurrentPacket! += Int64(numPackets)
}else{
AudioQueueStop(pAqData.mQueue!, false)
pAqData.mIsRunning = false
}
}
//call func to set the property
//create new outputqueue
//start th audioqueue
func start() {
let url = Bundle.main.url(forResource: "123", withExtension: "mp3")!
let audioFileURL = url as CFURL
print(audioFileURL)
let result = AudioFileOpenURL(audioFileURL, .readPermission, 0, &aqData.mAudioFile)
print(result)
var dataFormatSize = UInt32(MemoryLayout.size(ofValue: aqData.mDataFormat))
let result1 = AudioFileGetProperty(aqData.mAudioFile!, kAudioFilePropertyDataFormat,&dataFormatSize, &aqData.mDataFormat)
//get file property
var maxPacketSize = UInt32()
var propertySize = UInt32(MemoryLayout.size(ofValue: maxPacketSize))
let result2 = AudioFileGetProperty(aqData.mAudioFile!, kAudioFilePropertyPacketSizeUpperBound, &propertySize, &maxPacketSize)
//calculate and setting buffer size
DeriveBufferSize(ASBDesc: aqData.mDataFormat, maxPacketSize: maxPacketSize, seconds: 0.5, outBufferSize: &aqData.bufferByteSize, outNumPacketsToRead: &aqData.mNumPacketsToRead)
//check the format is VBR or CBR
let isFormatVBR = aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0
if isFormatVBR {
aqData.mPacketDescs = UnsafeMutablePointer<AudioStreamPacketDescription>.allocate(capacity: MemoryLayout.size(ofValue: AudioStreamPacketDescription()))
}else{
aqData.mPacketDescs = nil
}
//create new audio queue
let result4 = AudioQueueNewOutput(&aqData.mDataFormat,AudioPlay.HandleOutputBuffer, &aqData,CFRunLoopGetCurrent(),CFRunLoopMode.commonModes.rawValue, 0, &aqData.mQueue)
//queue start
aqData.mIsRunning = true
//alloc memory buffer
aqData.mCurrentPacket = 0
for i in 0..<AudioPlay.knumberBuffers {
AudioQueueAllocateBuffer(aqData.mQueue!, aqData.bufferByteSize,&aqData.mBuffers[i])
AudioPlay.HandleOutputBuffer(&aqData,aqData.mQueue!, (aqData.mBuffers[i])!)
}
//start audioqueue
AudioQueueStart(aqData.mQueue!, nil)
repeat{
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 0.25, false)
}while (aqData.mIsRunning)
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 1, false)
}
//calculate and setting buffer size
func DeriveBufferSize(ASBDesc:AudioStreamBasicDescription,maxPacketSize:UInt32,seconds:Float64,outBufferSize:UnsafeMutablePointer<UInt32>,outNumPacketsToRead:UnsafeMutablePointer<UInt32>) {
let maxBufferSize:UInt32 = 0x50000
let minBufferSIze:UInt32 = 0x4000
if ASBDesc.mFramesPerPacket != 0 {
let numPacketsForTime = ASBDesc.mSampleRate / Float64(ASBDesc.mFramesPerPacket) * seconds
outBufferSize.pointee = UInt32(numPacketsForTime) * maxPacketSize
}else{
outBufferSize.pointee = (maxBufferSize > maxPacketSize) ? maxBufferSize:maxPacketSize
}
if outBufferSize.pointee > maxBufferSize && outBufferSize.pointee > maxPacketSize {
outBufferSize.pointee = maxBufferSize
}else{
if outBufferSize.pointee < minBufferSIze{
outBufferSize.pointee = minBufferSIze
}
}
outNumPacketsToRead.pointee = outBufferSize.pointee/maxPacketSize
}
//dispose the audioqueue
func Dispose() {
AudioQueueDispose(aqData.mQueue!, true)
AudioFileClose(aqData.mAudioFile!)
free(aqData.mPacketDescs)
}
}
above code is writed according AudioQueueServiceProgrammingGuide!
create an instence of this class,and call the start() func
compile ok,but no output voice.
I had check the code many times but no advancing
can any one who familiar with audiiqueue help me?
Any help will be appreciated.
replace "AudioFileReadPacketData" with "AudioFileReadPackets" can fix this problem!
but I get a new problem like below sometime! sometimes it works well!
Stream Audio(20535,0x1085ac3c0) malloc: * error for object 0x6080001f6300: Invalid pointer dequeued from free list
* set a breakpoint in malloc_error_break to

Why can't I iterate through the full array in Metal

I have the following code, which I think allocates 2 arrays of 16kb of memory, fills one with 7 and the other with 4.
I then run the code and find that only the first 511 items get added together. I'm 80% certain it's to do with the size of the thread groups/thread counts. Any Ideas?
func test()
{
var (device, commandQueue, defaultLibrary, commandBuffer, computeCommandEncoder) = initMetal()
let alignment:Int = 0x4000
var xpointer: UnsafeMutableRawPointer? = nil
var ypointer: UnsafeMutableRawPointer? = nil
let numberOfFloats = 4096 //56
let numberOfBytes: Int = 16384
let retx = posix_memalign(&xpointer, alignment, numberOfBytes)
if retx != noErr {
let err = String(validatingUTF8: strerror(retx)) ?? "unknown error"
fatalError("Unable to allocate aligned memory: \(err).")
}
let rety = posix_memalign(&ypointer, alignment, numberOfBytes)
if rety != noErr {
let err = String(validatingUTF8: strerror(rety)) ?? "unknown error"
fatalError("Unable to allocate aligned memory: \(err).")
}
let datax = xpointer!.bindMemory(to: Float.self, capacity: numberOfFloats)
for index in 0..<numberOfFloats {
datax[index] = 7.0
}
let datay = ypointer!.bindMemory(to: Float.self, capacity: numberOfFloats)
for index in 0..<numberOfFloats {
datay[index] = 4.0
}
kernelFunction = defaultLibrary.makeFunction(name: "sigmoid")
do
{
pipelineState = try device.makeComputePipelineState(function: kernelFunction!)
}
catch
{
fatalError("Unable to create pipeline state")
}
let startTime = CFAbsoluteTimeGetCurrent()
computeCommandEncoder.setComputePipelineState(pipelineState)
var xvectorBufferNoCopy = device.makeBuffer(bytesNoCopy: xpointer!, length: numberOfBytes, options: [], deallocator: nil)
computeCommandEncoder.setBuffer(xvectorBufferNoCopy, offset: 0, at: 0)
var yvectorBufferNoCopy = device.makeBuffer(bytesNoCopy: ypointer!, length: numberOfBytes, options: [], deallocator: nil)
computeCommandEncoder.setBuffer(yvectorBufferNoCopy, offset: 0, at: 1)
var threadgroupCounts = MTLSize(width:32,height:1,depth:1)
var threadgroups = MTLSize(width:1024, height:1, depth:1)
computeCommandEncoder.dispatchThreadgroups(threadgroups, threadsPerThreadgroup: threadgroupCounts)
computeCommandEncoder.endEncoding()
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
let timeElapsed = CFAbsoluteTimeGetCurrent() - startTime
print("Time elapsed for \(title): \(timeElapsed) s")
let data = ypointer!.bindMemory(to: Float.self, capacity: numberOfFloats)
for index in 0..<numberOfFloats {
print("\(index) - \(data[index])")
}
}
kernel void addTest(const device float *inVector [[ buffer(0) ]],
device float *outVector [[ buffer(1) ]],
uint id [[ thread_position_in_grid ]]) {
outVector[id] = inVector[id] + outVector[id];
}